diff --git a/.github/workflows/test-package.yml b/.github/workflows/test-package.yml
index 7b43164..71b304e 100644
--- a/.github/workflows/test-package.yml
+++ b/.github/workflows/test-package.yml
@@ -13,7 +13,7 @@ jobs:
fail-fast: false
matrix:
os: ['ubuntu-latest', 'macos-latest', 'windows-latest']
- python-version: ['3.9', '3.10', '3.11', '3.12']
+ python-version: ['3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/test-wheel.yml b/.github/workflows/test-wheel.yml
index 8911374..d8207ed 100644
--- a/.github/workflows/test-wheel.yml
+++ b/.github/workflows/test-wheel.yml
@@ -13,7 +13,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ['3.9', '3.10', '3.11', '3.12']
+ python-version: ['3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2f4f1b8..ede2a88 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,163 @@
+# 5.0.0 (2024/08/29)
+
+**THIS UPDATE CONTAINS SEVERAL CHANGES THAT ARE NOT BACKWARDS COMPATIBLE WITH CODE WRITTEN USING VERSION 4.x!**
+**SOME OF THE ARGUMENTS IN THE COMMAND LINE INTERFACE HAVE ALSO CHANGED OR BEEN REMOVED!**
+
+
+### Linear Kramers-Kronig tests
+
+- Renamed the `TestResult` class to `KramersKronigResult`.
+- Renamed the `perform_test` function to `perform_kramers_kronig_test`.
+- Removed the `perform_exploratory_tests` function. The `pyimpspec.analysis.kramers_kronig.evaluate_log_F_ext` function should be used instead.
+- The `complex`, `real`, and `imaginary` tests now use`numpy.linalg.lstsq`. The previous implementations based on matrix inversion are now accessible by appending `-inv` (e.g., `complex-inv`).
+- Updated the `perform_test` and `perform_exploratory_tests` function signatures (e.g., `test='real'`, `add_capacitance=True`, and `add_inductance=True` are now the new defaults).
+- Replaced the `--add-capacitance` and `--add-inductance` CLI arguments with `--no-capacitance` and `--no-inductance`. They new arguments have the same abbreviated forms, which means that `-C` and `-L` now have the opposite effect compared to previously.
+- Added a variant of the mu-criterion algorithm that fits a logistic function to the mu-values (accessible via negative mu-criterion values).
+- Added a `suggest_num_RC` function for suggesting the optimum number of RC elements to use when performing linear Kramers-Kronig tests:
+ - Estimates the lower and upper limits for the range of numbers of RC elements to avoid under- and overfitting.
+ - Uses one or more methods/algorithms for suggesting the optimum number of RC elements:
+ - Method 1 (https://doi.org/10.1016/j.electacta.2014.01.034)
+ - Method 2 (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - Method 3 (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - Method 4 (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - Method 5 (https://doi.org/10.1016/j.electacta.2024.144951)
+ - Method 6 (https://doi.org/10.1016/j.electacta.2024.144951)
+ - Defaults to an approach that uses methods 4, 3, and 5 (in that order) to narrow the list of options.
+ - Multiple methods can be combined in different ways:
+ - mean of the suggested values
+ - sum of scores obtained based on rankings
+ - sum of relative scores
+- Added support for performing linear Kramers-Kronig tests on the admittance representation:
+ - Added `KramersKronigAdmittanceRC` element to represent the series RC element used in the equivalent circuit model.
+ - Added a boolean `admittance` attribute to the `TestResult` class.
+ - Added `get_parallel_resistance`, `get_parallel_capacitance`, and `get_parallel_inductance` methods to the `TestResult` class.
+ - Added a variant of the mu-criterion algorithm that uses capacitance values instead of resistance values when operating on the admittance representation.
+- Added `suggest_representation` function for suggesting either the impedance or admittance representation to use.
+- Added `evaluate_time_constant_extensions` function for optimizing the extension of the range of time constants.
+- Added the following arguments to the CLI:
+ - `--admittance` and `--impedance` to only perform tests on the admittance and impedance representation, respectively.
+ - `--suggestion-methods` to selecting one or more methods for suggesting the optimum number of RC elements.
+ - `--mean`, `--ranking`, and `--sum` to specify how to combine multiple methods for suggesting the optimum number of RC elements.
+ - `--num-F-ext-evaluations` to specify the number of evaluations to perform when optimizing the extension of the range of time constants.
+ - `--min-log-F-ext` and `--max-log-F-ext` to specify the lower and upper limits for the number of decades to extend the range of time constants when `--num-F-ext-evaluations` is set to something else than zero.
+ - `--log-F-ext` to specify the number of decades to extend the range of time constants when `--num-F-ext-evaluations` is set to zero.
+ - `--no-rapid-F-ext-evaluations` to evaluate the full range of the number of time constants at each sampled extension of the range of time constants.
+ - `--lower-limit`/`--upper-limit` to specify the lower/upper limit for the optimum number of RC elements to suggest.
+ - `--limit-delta` as an alternative way of specifying the limits of the range of optimum number of RC elements to suggest.
+ - `--plot-immittance` to automatically plot the corresponding immittance representation that was used when performing the linear Kramers-Kronig test.
+ - `--plot-pseudo-chi-squared` to override the plot when a single suggestion method has been chosen.
+ - `--plot-moving-average-width` to plot the moving averages of the residuals (the number of points must be provided).
+ - `--plot-estimated-noise` to include the estimated standard deviation of the noise.
+ - `--plot-log-F-ext-3d` and `--plot-log-F-ext-2d` to plot the pseudo chi-squared values as a function of the number of time constants and the extension of the range of time constants.
+ - `--plot-auto-limited-residuals` to automatically adjust the limits of the y-axes when plotting the relative residuals.
+- Added utility functions for subdividing frequency ranges and for calculating the curvatures of impedance spectra.
+- Updated the `perform_test` function to make use of the `perform_exploratory_tests`, `suggest_num_RC`, and `suggest_representation` functions.
+- Refactored the `perform_exploratory_tests` function to only perform tests with different numbers of RC elements.
+- Removed the `--automatic` argument from the CLI.
+- Updated the CLI to use similar plots both for exploratory results and when manually selecting a number of RC elements.
+- Removed the `mu` attribute from the `TestResult` class.
+- Fixed a bug in calculation of mu values that caused the series resistance to be included.
+- Some functions are no longer available the top level of the package and must instead be accessed via the `pyimpspec.analysis.kramers_kronig` module.
+
+
+### Z-HIT analysis
+
+- Added support for performing Z-HIT analysis on admittance data.
+- Added a CLI argument for performing analyses on admittance data (`--admittance` or `-A`).
+- Added two smoothing algorithms (https://doi.org/10.1021/acsmeasuresciau.1c00054):
+ - `whithend`: Whittaker-Henderson
+ - `modsinc`: modified sinc kernel with linear extrapolation
+- Updated the default smoothing algorithm to be `modsinc`.
+- Added title to plot by default when performing analyses via the CLI.
+- Changed `statsmodels` from a required dependency to an optional dependency.
+- Added support for showing a plot of the residuals when using the CLI.
+
+
+### Fitting
+
+- Added an optional `timeout` argument to the `fit_circuit` function that can be used to set a time limit. This can be used to force the fitting process to timeout if it is taking a very long time to finish.
+- Added `--timeout` argument to the CLI.
+- Added `--type` argument to the CLI so that fit results can optionally be plotted as, e.g., just a Nyquist plot.
+
+
+### Distribution of relaxation times
+
+- Updated the TR-RBF implementation to be based off of a newer version of pyDRTtools:
+ - `lambda_value` is now automatically determined using a cross-validation method unless the new `cross_validation` argument is an empty string (i.e., `cross_validation=""`).
+ - If one of the cross-validation methods is chosen, then `lambda_value` is used as the initial value.
+ - The currently supported cross-validation (CV) methods are:
+ - `"gcv"` - generalized cross-validation (GCV)
+ - `"mgcv"` - modified GCV
+ - `"rgcv"` - robust GCV
+ - `"re-im"` - real-imaginary CV
+ - `"lc"` - L-curve
+ - See https://doi.org/10.1149/1945-7111/acbca4 for more information about the CV methods.
+- Removed the `maximum_symmetry` argument from the TR-RBF implementation.
+- Changed how timeouts and progress updates are implemented when the TR-RBF method is called with `credible_intervals=True` and `timeout` is greater than zero.
+- Some functions and classes are no longer available the top level of the package and must instead be accessed via the `pyimpspec.analysis.drt` module.
+
+
+### Plotting
+
+- Added support for plotting admittance data:
+ - The affected plotting functions now have an optional, boolean `admittance` keyword argument.
+- Added a CLI argument for plotting admittance data (`--plot-admittance` or `-pY`).
+- Removed the `mpl.plot_mu_xps` function.
+- Added an `mpl.plot_pseudo_chisqr` function for plotting the pseudo chi-squared values of `TestResult` instances.
+- Updated the `mpl.plot_residuals` function to not use markers by default.
+- Fixed a bug that caused `mpl.plot_residuals` to have empty legend labels when no markers were used.
+- Updated how the limits are automatically determined by the `mpl.plot_residuals` function.
+- Updated how the ticks are determined in the y-axes of the `mpl.plot_residuals` function.
+- Added an `mpl.plot_suggestion` function that visualizes the suggested numbers of RC elements to use for linear Kramers-Kronig testing.
+- Added an `mpl.plot_suggestion_method` function that visualizes the data that is used to suggest the number of RC elements to use for linear Kramers-Kronig testing.
+- Removed support for colored axes from the `mpl.plot_nyquist` function.
+- Updated the `mpl.plot_nyquist` function to switch to using a marker when using `line=True` if all points are approximately the same.
+- Updated how the `--plot-type` CLI argument is handled when plotting, e.g., DRT results.
+- Added an `mpl.show` function that acts as a wrapper for `matplotlib.pyplot.show`.
+- Renamed the `plot_tests` function to `plot_kramers_kronig_tests`.
+- Renamed the `plot_complex` function to `plot_real_imaginary`.
+
+
+### Data parsing
+
+- Added support for parsing ZView/ZPlot `.z` files.
+- Added support for parsing PalmSens `.pssession` files.
+- Added support for two more variants of column headers to parsers that attempt to automatically identify columns.
+- Added support for using `pathlib.Path` in addition to `str` when passing paths to, e.g., the `parse_data` function.
+- Added `--output-indices` argument to the CLI to include zero-based indices in text output.
+- Added `--exclude-indices` argument to the CLI so that specific data points (e.g., outliers) can be excluded based on their zero-based indices.
+- Added `--nth-data-set` argument to the CLI so that one or more data sets can be chosen from a file.
+- Updated parsing of `.dta` files to support parsing the drift corrected impedances when it is available. The returned `List[DataSet]` is sorted so that the drift corrected impedance spectra have a lower index in the list than the uncorrected impedance spectra.
+- Fixed a bug that caused an exception to be raised when parsing a spreadsheet that also contained at least one empty sheet.
+- Fixed a bug that caused NumPy arrays in the dictionary returned by `DataSet.to_dict` to not be compatible with `json.dump` and `json.dumps` from the standard library.
+- Fixed a bug where `DataSet.from_dict` was unable to handle mask dictionaries where the keys were strings instead of integers.
+- Fixed a bug where the keyword arguments provided to `parse_data` were not being passed on to the different format parsers in a specific case.
+- Fixed a bug where detecting columns in files would fail if an otherwise valid column name started with whitespace.
+
+
+### Elements
+
+- Added the ZARC element, which is represented by `Zarc` in circuit description codes.
+- Added a `reset_default_parameter_values` function to reset the default parameter values of either all element types or specific element types.
+- Added `remove_elements` and `reset` functions to the `pyimpspec.circuit.registry` module that can be used to remove user-defined elements, or to remove user-defined elements and reset the default parameter values, respectively.
+- Added an optional `default_only` parameter to the `get_elements` function so that the function can return either all elements (including user-defined elements) or just the elements included in pyimpspec by default. The parameter is set to `False` by default, which means that all elements are returned.
+
+
+### Miscellaneous
+
+- Added `get` and `get_total` methods to the `Progress` class for obtaining the current step and the current total.
+- Added `register_default_handler` and `clear_default_handler_output` functions to the `progress` module.
+- Added mock data for circuit with negative differential resistance.
+- Added mock data for Randles circuit with diffusion.
+- Added noisy variants of mock data.
+- Added a `set_default_values` class method to circuit elements.
+- Refactored code.
+- Updated minimum versions of dependencies and removed support for Python 3.9.
+- Removed cvxpy from the list of supported optional dependencies.
+- Added `canvas` argument to `Circuit.to_drawing` method.
+- Changed some CLI argument names to improve consistency.
+
+
# 4.1.1 (2024/03/14)
- Maintenance release that updates the version requirements for dependencies.
diff --git a/COPYRIGHT b/COPYRIGHT
index f02fcd4..651614e 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/LICENSES/LICENSE-cvxpy.txt b/LICENSES/LICENSE-cvxpy.txt
deleted file mode 100644
index c645dd5..0000000
--- a/LICENSES/LICENSE-cvxpy.txt
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2017 Steven Diamond
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/LICENSES/LICENSE-nearestSPD.txt b/LICENSES/LICENSE-nearestSPD.txt
new file mode 100644
index 0000000..222805c
--- /dev/null
+++ b/LICENSES/LICENSE-nearestSPD.txt
@@ -0,0 +1,23 @@
+Copyright (c) 2013, John D'Errico
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/LICENSES/README.md b/LICENSES/README.md
index d6178b1..9404026 100644
--- a/LICENSES/README.md
+++ b/LICENSES/README.md
@@ -8,11 +8,6 @@
- License: GPLv3 or later
- Dependency.
-# cvxpy
-- https://github.com/cvxpy/cvxpy
-- License: Apache License version 2.0
-- Optional dependency.
-
# DRT-python-code
- https://github.com/akulikovsky/DRT-python-code
- License: GPLv3 or later
@@ -26,7 +21,7 @@
# impedance.py
- https://github.com/ECSHackWeek/impedance.py
- License: MIT
-- Copied code to implement linear Kramers-Kronig tests using matrix solvers.
+- Copied code to implement linear Kramers-Kronig tests for impedance representations of immittance data using matrix inversion.
# Jinja
- https://github.com/pallets/jinja/
@@ -48,6 +43,11 @@
- License: custom license
- Dependency.
+# nearestSPD
+- https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
+- License: BSD 2-clause
+- Ported to Python by the developers of pyDRTtools for use in the TR-RBF DRT method.
+
# numdifftools
- https://github.com/pbrod/numdifftools
- License: BSD 3-clause
@@ -112,3 +112,8 @@
- https://github.com/srstevenson/xdg
- License: ISC
- Dependency.
+
+# Smoothing algorithms: Whittaker-Henderson and modified sinc kernel
+- https://doi.org/10.1021/acsmeasuresciau.3c00017
+- License: GPLv3
+- Ported from Java to Python.
diff --git a/README.md b/README.md
index fdb3247..137cc01 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,6 @@ A package for parsing, validating, analyzing, and simulating impedance spectra.
[![GitHub](https://img.shields.io/github/license/vyrjana/pyimpspec)](https://www.gnu.org/licenses/gpl-3.0.html)
[![PyPI](https://img.shields.io/pypi/v/pyimpspec)](https://pypi.org/project/pyimpspec/)
-**Version 5.0.0 will be released soon. See [this branch](https://github.com/vyrjana/pyimpspec/tree/dev-5-0-0-rc) for the code (will eventually be merged into the main branch) and [this issue](https://github.com/vyrjana/pyimpspec/issues/9) for status updates.**
## Table of contents
@@ -70,7 +69,7 @@ See [CONTRIBUTORS](CONTRIBUTORS) for a list of people who have contributed to th
## License
-Copyright 2023 pyimpspec developers
+Copyright 2024 pyimpspec developers
Pyimpspec is licensed under the [GPLv3 or later](https://www.gnu.org/licenses/gpl-3.0.html).
diff --git a/build.sh b/build.sh
index b5f998a..a4a2d4d 100644
--- a/build.sh
+++ b/build.sh
@@ -1,6 +1,6 @@
#!/bin/bash
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -84,6 +84,9 @@ if [ "$(git status --porcelain=v1 | wc -l)" -ne 0 ]; then
fi
# Check for major issues
+# NOTE: May need to skip flake8 in some cases when trying to build. Currently
+# (2024-08-29) raising exceptions when running flake8 v7.1.1 on:
+# 'Python 3.12.4 (main, Jun 7 2024, 06:33:07) [GCC 14.1.1 20240522] on linux'
flake8 . --select=E9,F63,F7,F82 --show-source --statistics
echo "flake8 didn't find any issues..."
echo
diff --git a/create_venv.sh b/create_venv.sh
new file mode 100644
index 0000000..d2d9e67
--- /dev/null
+++ b/create_venv.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+target_folder=".venv/pyimpspec"
+if [ -d "$target_folder" ]; then
+ echo "The '$target_folder' folder already exists!"
+ exit
+fi
+
+echo "Initializing virtual environment in '$target_folder'"
+python3 -m venv "$target_folder"
+if [ $? -ne 0 ]; then
+ exit
+fi
+
+# Activating the virtual environment
+source "$target_folder/bin/activate"
+if [ $? -ne 0 ]; then
+ exit
+fi
+
+echo "Installing package (editable mode) without dependencies"
+# This will cause setup.py to refresh the dev-requirements.txt
+# and requirements.txt files.
+python3 -m pip install -e . --no-deps
+if [ $? -ne 0 ]; then
+ exit
+fi
+
+echo "Installing development dependencies"
+python3 -m pip install -r "dev-requirements.txt"
+if [ $? -ne 0 ]; then
+ exit
+fi
+
+echo "Installing package dependencies"
+python3 -m pip install -r "requirements.txt"
+if [ $? -ne 0 ]; then
+ exit
+fi
diff --git a/dev-requirements.txt b/dev-requirements.txt
index e26fe91..357df82 100644
--- a/dev-requirements.txt
+++ b/dev-requirements.txt
@@ -1,5 +1,5 @@
build~=1.2
-flake8~=7.0
-setuptools~=70.0
-sphinx~=7.3
-sphinx-rtd-theme~=2.0
+flake8~=7.1
+setuptools~=74.0
+sphinx~=8.0
+sphinx-rtd-theme~=2.0
\ No newline at end of file
diff --git a/docs/source/apidocs_circuit.rst b/docs/source/apidocs_circuit.rst
index adce77e..840a1d8 100644
--- a/docs/source/apidocs_circuit.rst
+++ b/docs/source/apidocs_circuit.rst
@@ -14,6 +14,9 @@ Functions
.. automodule:: pyimpspec
:members: get_elements, parse_cdc, simulate_spectrum, register_element
+.. automodule:: pyimpspec.circuit.registry
+ :members: remove_elements, reset, reset_default_parameter_values
+
Base classes
------------
diff --git a/docs/source/apidocs_drt.rst b/docs/source/apidocs_drt.rst
index 1b25d38..fbca888 100644
--- a/docs/source/apidocs_drt.rst
+++ b/docs/source/apidocs_drt.rst
@@ -16,37 +16,37 @@ Method functions and classes
BHT method
~~~~~~~~~~
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: calculate_drt_bht
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: BHTResult
m(RQ)fit method
~~~~~~~~~~~~~~~
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: calculate_drt_mrq_fit
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: MRQFitResult
TR-NNLS method
~~~~~~~~~~~~~~
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: calculate_drt_tr_nnls
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: TRNNLSResult
TR-RBF method
~~~~~~~~~~~~~
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: calculate_drt_tr_rbf
-.. automodule:: pyimpspec
+.. automodule:: pyimpspec.analysis.drt
:members: TRRBFResult
.. raw:: latex
diff --git a/docs/source/apidocs_kramers_kronig.rst b/docs/source/apidocs_kramers_kronig.rst
index 8ffd289..4ece5b7 100644
--- a/docs/source/apidocs_kramers_kronig.rst
+++ b/docs/source/apidocs_kramers_kronig.rst
@@ -5,16 +5,27 @@ Kramers-Kronig testing
A collection of functions and classes for performing Kramers-Kronig tests on data sets.
-
Functions
---------
.. automodule:: pyimpspec
- :members: perform_test, perform_exploratory_tests
+ :members: perform_kramers_kronig_test
+
+.. automodule:: pyimpspec
+ :members: perform_exploratory_kramers_kronig_tests
+
+.. automodule:: pyimpspec.analysis.kramers_kronig
+ :members: evaluate_log_F_ext, suggest_num_RC_limits, suggest_num_RC, suggest_representation
+
+.. automodule:: pyimpspec.analysis.kramers_kronig.algorithms
+ :members: suggest_num_RC_method_1, suggest_num_RC_method_2, suggest_num_RC_method_3, suggest_num_RC_method_4, suggest_num_RC_method_5, suggest_num_RC_method_6
+
+.. automodule:: pyimpspec.analysis.kramers_kronig.algorithms.utility
+ :members: calculate_curvatures, subdivide_frequencies
Class
-----
.. automodule:: pyimpspec
- :members: TestResult
+ :members: KramersKronigResult
.. raw:: latex
diff --git a/docs/source/apidocs_plot_mpl.rst b/docs/source/apidocs_plot_mpl.rst
index 1dd13fd..2879a31 100644
--- a/docs/source/apidocs_plot_mpl.rst
+++ b/docs/source/apidocs_plot_mpl.rst
@@ -12,7 +12,7 @@ Wrappers
These functions provide a high-level API for visualizing various objects/results (e.g., :class:`~pyimpspec.data.DataSet`).
.. automodule:: pyimpspec.mpl
- :members: plot_circuit, plot_data, plot_drt, plot_fit, plot_tests
+ :members: plot_circuit, plot_data, plot_drt, plot_fit, plot_kramers_kronig_tests, plot_log_F_ext
@@ -22,7 +22,7 @@ Primitives
These functions are used by the wrapper functions to make a more complex figure with multiple subplots.
.. automodule:: pyimpspec.mpl
- :members: plot_bht_scores, plot_bode, plot_complex, plot_gamma, plot_imaginary, plot_magnitude, plot_mu_xps, plot_nyquist, plot_phase, plot_real, plot_residuals
+ :members: plot_bht_scores, plot_bode, plot_real_imaginary, plot_gamma, plot_imaginary, plot_magnitude, plot_pseudo_chisqr, plot_num_RC_suggestion, plot_num_RC_suggestion_method, plot_nyquist, plot_phase, plot_real, plot_residuals
Examples
@@ -52,31 +52,22 @@ plot_circuit
import pyimpspec
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE, TC1
- import matplotlib.pyplot as plt
from numpy import logspace, log10 as log
- f = EXAMPLE.get_frequencies()
- figure, axes = mpl.plot_circuit(TC1, frequencies=f, label="TC-1", title="", legend=False, colored_axes=True)
+ circuit = pyimpspec.generate_mock_circuits("CIRCUIT_1")[0]
+ data = pyimpspec.generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ f = data.get_frequencies()
+ figure, axes = mpl.plot_circuit(circuit, frequencies=f, label="TC-1", title="", legend=False, colored_axes=True)
figure.tight_layout()
- plt.show()
-
- data = pyimpspec.simulate_spectrum(
- TC1,
- logspace(
- log(max(f)),
- log(min(f)),
- num=int(log(max(f)) - log(min(f))) * 100 + 1,
- ),
- label="TC-1",
- )
+ mpl.show()
+
figure, axes = mpl.plot_nyquist(data, line=True)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_bode(data, line=True)
figure.tight_layout()
- plt.show()
+ mpl.show()
.. raw:: latex
@@ -94,21 +85,20 @@ plot_data
import pyimpspec
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
import pyimpspec.plot.colors as colors
- import matplotlib.pyplot as plt
- figure, axes = mpl.plot_data(EXAMPLE, legend=False, colored_axes=True)
+ data = pyimpspec.generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ figure, axes = mpl.plot_data(data, legend=False, colored_axes=True)
figure.tight_layout()
- plt.show()
+ mpl.show()
- figure, axes = mpl.plot_nyquist(EXAMPLE)
+ figure, axes = mpl.plot_nyquist(data)
figure.tight_layout()
- plt.show()
+ mpl.show()
- figure, axes = mpl.plot_bode(EXAMPLE)
+ figure, axes = mpl.plot_bode(data)
figure.tight_layout()
- plt.show()
+ mpl.show()
.. raw:: latex
@@ -119,7 +109,7 @@ plot_drt
~~~~~~~~
:func:`~pyimpspec.mpl.plot_drt`
-* :func:`~pyimpspec.mpl.plot_complex`
+* :func:`~pyimpspec.mpl.plot_real_imaginary`
* :func:`~pyimpspec.mpl.plot_gamma`
* :func:`~pyimpspec.mpl.plot_residuals`
@@ -127,45 +117,44 @@ plot_drt
import pyimpspec
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
import pyimpspec.plot.colors as colors
- import matplotlib.pyplot as plt
- drt = pyimpspec.calculate_drt_tr_nnls(EXAMPLE)
+ data = pyimpspec.generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ drt = pyimpspec.analysis.drt.calculate_drt_tr_nnls(data)
figure, axes = mpl.plot_drt(
drt,
- EXAMPLE,
+ data,
legend=False,
colored_axes=True,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
- figure, axes = mpl.plot_complex(
- EXAMPLE,
+ figure, axes = mpl.plot_real_imaginary(
+ data,
colors={
"real": colors.COLOR_BLACK,
"imaginary": colors.COLOR_BLACK,
},
legend=False,
)
- _ = mpl.plot_complex(
+ _ = mpl.plot_real_imaginary(
drt,
line=True,
figure=figure,
axes=axes,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_gamma(drt)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_residuals(drt)
figure.tight_layout()
- plt.show()
+ mpl.show()
.. raw:: latex
@@ -184,24 +173,23 @@ plot_fit
import pyimpspec
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
import pyimpspec.plot.colors as colors
- import matplotlib.pyplot as plt
+ data = pyimpspec.generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
circuit = pyimpspec.parse_cdc("R(RC)(RW)")
- fit = pyimpspec.fit_circuit(circuit, data=EXAMPLE)
+ fit = pyimpspec.fit_circuit(circuit, data=data)
figure, axes = mpl.plot_fit(
fit,
- EXAMPLE,
+ data,
legend=False,
colored_axes=True,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_nyquist(
- EXAMPLE,
+ data,
colors={"impedance": colors.COLOR_BLACK},
legend=False,
)
@@ -212,10 +200,10 @@ plot_fit
axes=axes,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_bode(
- EXAMPLE,
+ data,
colors={
"magnitude": colors.COLOR_BLACK,
"phase": colors.COLOR_BLACK,
@@ -229,22 +217,23 @@ plot_fit
axes=axes,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_residuals(fit)
figure.tight_layout()
- plt.show()
+ mpl.show()
.. raw:: latex
\clearpage
-plot_tests
-~~~~~~~~~~
-:func:`~pyimpspec.mpl.plot_tests`
+plot_kramers_kronig_tests
+~~~~~~~~~~~~~~~~~~~~~~~~~
+:func:`~pyimpspec.mpl.plot_kramers_kronig_tests`
-* :func:`~pyimpspec.mpl.plot_mu_xps`
+* :func:`~pyimpspec.mpl.plot_pseudo_chisqr`
+* :func:`~pyimpspec.mpl.plot_num_RC_suggestion`
* :func:`~pyimpspec.mpl.plot_residuals`
* :func:`~pyimpspec.mpl.plot_nyquist`
* :func:`~pyimpspec.mpl.plot_bode`
@@ -253,54 +242,51 @@ plot_tests
import pyimpspec
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
import pyimpspec.plot.colors as colors
- import matplotlib.pyplot as plt
- mu_criterion = 0.85
- tests = pyimpspec.perform_exploratory_tests(
- EXAMPLE,
- mu_criterion=mu_criterion,
- add_capacitance=True,
- )
+ data = pyimpspec.generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ tests = pyimpspec.analysis.kramers_kronig.evaluate_log_F_ext(data)[0][1]
- figure, axes = mpl.plot_tests(
+ suggestion = pyimpspec.analysis.kramers_kronig.suggest_num_RC(tests)
+ test, scores, lower_limit, upper_limit = suggestion
+ figure, axes = mpl.plot_kramers_kronig_tests(
tests,
- mu_criterion,
- EXAMPLE,
+ suggestion,
+ data,
legend=False,
colored_axes=True,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
- figure, axes = mpl.plot_mu_xps(
- tests,
- mu_criterion,
- )
+ figure, axes = mpl.plot_pseudo_chisqr(tests, lower_limit=lower_limit, upper_limit=upper_limit)
+ figure.tight_layout()
+ mpl.show()
+
+ figure, axes = mpl.plot_num_RC_suggestion(suggestion)
figure.tight_layout()
- plt.show()
+ mpl.show()
- figure, axes = mpl.plot_residuals(tests[0])
+ figure, axes = mpl.plot_residuals(test)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_nyquist(
- EXAMPLE,
+ data,
colors={"impedance": colors.COLOR_BLACK},
legend=False,
)
_ = mpl.plot_nyquist(
- tests[0],
+ test,
line=True,
figure=figure,
axes=axes,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
figure, axes = mpl.plot_bode(
- EXAMPLE,
+ data,
colors={
"magnitude": colors.COLOR_BLACK,
"phase": colors.COLOR_BLACK,
@@ -308,13 +294,45 @@ plot_tests
legend=False,
)
_ = mpl.plot_bode(
- tests[0],
+ test,
line=True,
figure=figure,
axes=axes,
)
figure.tight_layout()
- plt.show()
+ mpl.show()
+
+.. raw:: latex
+
+ \clearpage
+
+
+plot_log_F_ext
+~~~~~~~~~~~~~~
+:func:`~pyimpspec.mpl.plot_log_F_ext`
+
+.. plot::
+
+ import pyimpspec
+ from pyimpspec import mpl
+ import pyimpspec.plot.colors as colors
+
+ data = pyimpspec.generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ evaluations = pyimpspec.analysis.kramers_kronig.evaluate_log_F_ext(data)
+
+ figure, axes = mpl.plot_log_F_ext(
+ evaluations,
+ projection="3d",
+ )
+ figure.tight_layout()
+ mpl.show()
+
+ figure, axes = mpl.plot_log_F_ext(
+ evaluations,
+ projection="2d",
+ )
+ figure.tight_layout()
+ mpl.show()
.. raw:: latex
diff --git a/docs/source/conf.py b/docs/source/conf.py
index c1fbfc1..85c591a 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -10,7 +10,7 @@
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "pyimpspec"
-copyright = "2023, pyimpspec developers"
+copyright = "2024, pyimpspec developers"
author = "pyimpspec developers"
release = "X.Y.Z"
version_path = join(dirname(dirname(dirname(abspath(__file__)))), "version.txt")
diff --git a/docs/source/guide_cli.rst b/docs/source/guide_cli.rst
index 3cef8ec..4f9f336 100644
--- a/docs/source/guide_cli.rst
+++ b/docs/source/guide_cli.rst
@@ -36,24 +36,30 @@ These examples have been split across multiple lines here for the sake of format
# Parse some file for impedance spectra and perform Kramers-Kronig tests
# with a series capacitance and a mu-criterion of 0.7.
- # The perform_exploratory_tests function is used by default unless the
- # number of parallel RC elements is specified or the --automatic option
- # is used.
pyimpspec test "path to some file" \
- --add-capacitance \
+ --no-capacitance \
--mu-criterion 0.7
-The path to an input file can be replaced with special placeholder values:
+The path to an input file can be replaced with special placeholder values such as:
-- ``""``: test circuit 1 from `Boukamp (1995)`_
-- ``""``: a (simplified) Randles circuit
-- ``""``: a (simplified) Randles circuit with drift
+- ``""``: test circuit 1 from `Boukamp (1995)`_
+- ``""``: a simplified Randles circuit
+- ``""``: a simplified Randles circuit with drift
.. _`Boukamp (1995)`: https://doi.org/10.1149/1.2044210
+The wildcard ``*`` can also be used to select multiple immittance spectra.
+
+.. code:: bash
+
+ pyimpspec zhit ""
+
+
+Some settings can also be changed.
+
.. code:: bash
- pyimpspec zhit ""
+ pyimpspec zhit ""
A config file can be saved and then used to override the defaults arguments so that the arguments don't have to be explicitly specified in the terminal.
diff --git a/docs/source/guide_data.rst b/docs/source/guide_data.rst
index 9c9ba2c..cb2ae48 100644
--- a/docs/source/guide_data.rst
+++ b/docs/source/guide_data.rst
@@ -4,12 +4,14 @@ Data parsing
============
Individual impedance spectra are represented in pyimpspec as |DataSet| objects.
-The |parse_data| function acts as a wrapper for the various parsing functions available for different file formats:
+The |parse_data| function acts as a wrapper for the various parsing functions available for different file formats such as:
- BioLogic: ``.mpt``
- Eco Chemie: ``.dfr``
- Gamry: ``.dta``
- Ivium: ``.idf`` and ``.ids``
+- PalmSens: ``.pssession``
+- ZView: ``.z``
- Spreadsheets: ``.xlsx`` and ``.ods``
- Plain-text character-separated values (CSV)
@@ -52,8 +54,10 @@ Below is a Nyquist plot of some example data (test circuit 1 or TC-1 from `Bouka
.. plot::
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
- figure, axes = mpl.plot_nyquist(EXAMPLE)
+ from pyimpspec import generate_mock_data
+
+ data = generate_mock_data("CIRCUIT_1")[0]
+ figure, axes = mpl.plot_nyquist(data)
More information and examples about these functions can be found in the API documentation (:doc:`/apidocs_plot_mpl`).
@@ -112,13 +116,14 @@ Below are two Bode plots of the example above from just before and after the low
from pyimpspec import DataSet
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
- data = DataSet.duplicate(EXAMPLE)
+ from pyimpspec import generate_mock_data
+
+ data = generate_mock_data("CIRCUIT_1")[0]
figure, axes = mpl.plot_bode(data)
+
data.low_pass(1e3)
data.high_pass(1e1)
figure, axes = mpl.plot_bode(data)
- data.set_mask({})
.. raw:: latex
diff --git a/docs/source/guide_drt.rst b/docs/source/guide_drt.rst
index 3246773..8e63d5f 100644
--- a/docs/source/guide_drt.rst
+++ b/docs/source/guide_drt.rst
@@ -12,15 +12,17 @@ The DRT results can be used to, e.g., develop a suitable equivalent circuit.
Validation of impedance spectra using, e.g., Kramers-Kronig tests prior to proceeding with DRT analyses is highly recommended.
The instrument control software for your potentiostat/galvanostat may include tools for analyzing the excitation and response signals for indications of non-linear behavior.
+
+The supported methods
+---------------------
+
Implementations based on the following approaches are included in pyimpspec:
-- **BHT**: The Bayesian Hilbert transform method (see `Liu et al. (2020) `_) that was originally implemented in DRTtools_ and pyDRTtools_.
+- **BHT**: The Bayesian Hilbert transform method (see `Liu et al. (2020) `_) that was originally implemented in `DRTtools `_ and `pyDRTtools `_.
- **m(RQ)fit**: The multi-(RQ)-fit method (see `Boukamp (2015) `_ and `Boukamp and Rolle (2017) `_).
- **TR-NNLS**: Tikhonov regularization and non-negative least squares (see `Kulikovsky (2021) `_) that was originally implemented in `DRT-python-code `_.
-- **TR-RBF**: Tikhonov regularization and radial basis function (or piecewise linear) discretization (see `Wan et al. (2015) `_, `Ciucci and Chen (2015) `_, and `Effat and Ciucci (2017) `_) that was originally implemented in DRTtools_ and pyDRTtools_.
+- **TR-RBF**: Tikhonov regularization and radial basis function (or piecewise linear) discretization (see `Wan et al. (2015) `_, `Ciucci and Chen (2015) `_, `Effat and Ciucci (2017) `_, and `Maradesa et al. (2023) `_) that was originally implemented in DRTtools_ and pyDRTtools_.
-.. _DRTtools: https://github.com/ciuccislab/DRTtools
-.. _pyDRTtools: https://github.com/ciuccislab/pyDRTtools
.. note::
@@ -33,24 +35,34 @@ Implementations based on the following approaches are included in pyimpspec:
The BHT method makes use of random initial values for some of its calculations, which can produce different results when repeated with the same impedance spectrum.
+
+How to use
+----------
+
+Each method has its own function that can be used but there is also a wrapper function (|calculate_drt|) that takes a ``method`` argument.
+
.. doctest::
>>> from pyimpspec import (
+ ... DataSet,
... DRTResult, # An abstract class for DRT results
+ ... calculate_drt, # Wrapper function for all methods
+ ... generate_mock_data,
+ ... )
+ >>> from pyimpspec.analysis.drt import (
... BHTResult, # Result of the BHT method
... MRQFitResult, # Result of the m(RQ)fit method
... TRNNLSResult, # Result of the TR-NNLS method
... TRRBFResult, # Result of the TR-RBF method
- ... calculate_drt, # Wrapper function for all methods
... calculate_drt_bht, # BHT method
... calculate_drt_mrq_fit, # m(RQ)fit method
... calculate_drt_tr_nnls, # TR-NNLS method
... calculate_drt_tr_rbf, # TR-RBF method
... )
- >>> from pyimpspec.mock_data import EXAMPLE
>>>
- >>> drt: TRNNLSResult = calculate_drt_tr_nnls(EXAMPLE, lambda_value=1e-4)
- >>> drt: DRTResult = calculate_drt(EXAMPLE, method="tr-nnls", lambda_value=1e-4)
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ >>> drt: TRNNLSResult = calculate_drt_tr_nnls(data, lambda_value=1e-4)
+ >>> drt: DRTResult = calculate_drt(data, method="tr-nnls", lambda_value=1e-4)
>>> assert isinstance(drt, TRNNLSResult)
Below are some figures demonstrating the results of the methods listed above when applied to the example data.
@@ -58,38 +70,41 @@ Below are some figures demonstrating the results of the methods listed above whe
.. plot::
from pyimpspec import (
+ fit_circuit,
+ parse_cdc,
+ generate_mock_data,
+ )
+ from pyimpspec.analysis.drt import (
calculate_drt_bht,
calculate_drt_mrq_fit,
calculate_drt_tr_nnls,
calculate_drt_tr_rbf,
- fit_circuit,
- parse_cdc,
)
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
def adjust_limits(ax):
ax.set_xlim(1e-5, 1e1)
ax.set_ylim(-100, 900)
- drt = calculate_drt_bht(EXAMPLE)
+ data = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ drt = calculate_drt_bht(data)
figure, axes = mpl.plot_gamma(drt)
adjust_limits(axes[0])
figure.tight_layout()
circuit = parse_cdc("R(RQ)(RQ)")
- fit = fit_circuit(circuit, EXAMPLE)
- drt = calculate_drt_mrq_fit(EXAMPLE, fit.circuit, fit=fit)
+ fit = fit_circuit(circuit, data)
+ drt = calculate_drt_mrq_fit(data, fit.circuit, fit=fit)
figure, axes = mpl.plot_gamma(drt)
adjust_limits(axes[0])
figure.tight_layout()
- drt = calculate_drt_tr_nnls(EXAMPLE)
+ drt = calculate_drt_tr_nnls(data)
figure, axes = mpl.plot_gamma(drt)
adjust_limits(axes[0])
figure.tight_layout()
- drt = calculate_drt_tr_rbf(EXAMPLE)
+ drt = calculate_drt_tr_rbf(data)
figure, axes = mpl.plot_gamma(drt)
adjust_limits(axes[0])
figure.tight_layout()
@@ -109,6 +124,7 @@ References:
- Kulikovsky, A., 2021, J. Electrochem. Soc., 168, 044512 (https://doi.org/10.1149/1945-7111/abf508)
- Liu, J., Wan, T. H., and Ciucci, F., 2020, Electrochim. Acta, 357, 136864 (https://doi.org/10.1016/j.electacta.2020.136864)
- Wan, T. H., Saccoccio, M., Chen, C., and Ciucci, F., 2015, Electrochim. Acta, 184, 483-499 (https://doi.org/10.1016/j.electacta.2015.09.097)
+- Maradesa, A., Py, B., Wan, T.H., Effat, M.B., and Ciucci F., 2023, J. Electrochem. Soc, 170, 030502 (https://doi.org/10.1149/1945-7111/acbca4)
.. raw:: latex
diff --git a/docs/source/guide_fitting.rst b/docs/source/guide_fitting.rst
index 5772315..1f53b98 100644
--- a/docs/source/guide_fitting.rst
+++ b/docs/source/guide_fitting.rst
@@ -26,11 +26,12 @@ The |fit_circuit| function performs the fitting and returns a |FitResult| object
... FitResult,
... fit_circuit,
... parse_cdc,
+ ... generate_mock_data,
... )
- >>> from pyimpspec.mock_data import EXAMPLE
>>>
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
>>> circuit: Circuit = parse_cdc("R(RC)(RW)")
- >>> fit: FitResult = fit_circuit(circuit, EXAMPLE)
+ >>> fit: FitResult = fit_circuit(circuit, data)
|fit_circuit| tries various combinations of iteration methods and weights by default to achieve the best fit.
It may still be necessary to adjust the initial values and/or the limits of the various parameters of the circuit elements.
@@ -41,19 +42,20 @@ The two figures below show the impedance spectrum and the fitted circuit as a Ny
.. plot::
from pyimpspec import (
- Circuit,
- DataSet,
FitResult,
fit_circuit,
parse_cdc,
+ generate_mock_data,
)
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
- circuit: Circuit = parse_cdc("R(RC)(RW)")
- fit: FitResult = fit_circuit(circuit, EXAMPLE)
- figure, axes = mpl.plot_nyquist(EXAMPLE, colors={"impedance": "black"})
- _ = mpl.plot_nyquist(fit, line=True, figure=figure, axes=axes)
+ data = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ circuit = parse_cdc("R(RC)(RW)")
+ fit = fit_circuit(circuit, data)
+
+ figure, axes = mpl.plot_nyquist(data, colors={"impedance": "black"})
+ mpl.plot_nyquist(fit, line=True, figure=figure, axes=axes)
figure.tight_layout()
+
figure, axes = mpl.plot_residuals(fit)
figure.tight_layout()
@@ -73,11 +75,17 @@ Thus, it is quite easy to generate a table containing the fitted parameter value
>>> from schemdraw import Drawing
>>> from pandas import DataFrame
- >>> from pyimpspec import Circuit, FitResult, fit_circuit
- >>> from pyimpspec.mock_data import EXAMPLE
+ >>> from pyimpspec import (
+ ... Circuit,
+ ... DataSet,
+ ... FitResult,
+ ... fit_circuit,
+ ... generate_mock_data,
+ ... )
>>>
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
>>> circuit: Circuit = parse_cdc("R(RC)(RW)")
- >>> fit: FitResult = fit_circuit(circuit, EXAMPLE)
+ >>> fit: FitResult = fit_circuit(circuit, data)
>>>
>>> fit.circuit.to_sympy()
R_0 + 1/(2*I*pi*C_2*f + 1/R_1) + 1/(sqrt(2)*sqrt(pi)*Y_4*sqrt(I*f) + 1/R_3)
@@ -144,11 +152,17 @@ The contents of ``parameters`` and ``statistics`` in the example above would be
>>> from schemdraw import Drawing
>>> from pandas import DataFrame
- >>> from pyimpspec import Circuit, FitResult, fit_circuit
- >>> from pyimpspec.mock_data import EXAMPLE
+ >>> from pyimpspec import (
+ ... Circuit,
+ ... DataSet,
+ ... FitResult,
+ ... fit_circuit,
+ ... generate_mock_data,
+ ... )
>>>
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
>>> circuit: Circuit = parse_cdc("R(RC)(RW)")
- >>> fit: FitResult = fit_circuit(circuit, EXAMPLE)
+ >>> fit: FitResult = fit_circuit(circuit, data)
>>>
>>> fit.circuit.to_sympy()
R_0 + 1/(2*I*pi*C_2*f + 1/R_1) + 1/(sqrt(2)*sqrt(pi)*Y_4*sqrt(I*f) + 1/R_3)
diff --git a/docs/source/guide_installing.rst b/docs/source/guide_installing.rst
index aeb8cdd..5d51f62 100644
--- a/docs/source/guide_installing.rst
+++ b/docs/source/guide_installing.rst
@@ -16,7 +16,7 @@ The package **may** also work on other platforms depending on whether or not tho
Requirements
------------
-- `Python `_ (3.9, 3.10, 3.11, or 3.12)
+- `Python `_ (3.10, 3.11, or 3.12)
- The following Python packages
- `Jinja `_
@@ -34,62 +34,86 @@ Requirements
- `tabulate `_
- `xdg `_
-These Python packages (and their dependencies) are installed automatically when pyimpspec is installed using `pip `_.
+These Python packages (and their dependencies) are installed automatically when pyimpspec is installed using, e.g., `pip `_.
The following Python packages can be installed as optional dependencies for additional functionality:
- DRT calculations using the `TR-RBF method `_ (at least one of the following is required):
- `cvxopt `_
- `kvxopt `_ (this fork of cvxopt may support additional platforms)
- - `cvxpy `_
-
-.. note::
-
- Windows and MacOS users who wish to install CVXPY **must** follow the steps described in the `CVXPY documentation `_!
Installing
----------
-Make sure that Python and pip are installed first (see previous section for supported Python versions).
-For example, open a terminal and run the command:
+Make sure that both Python and pip are installed first (see previous section for supported Python versions).
+For example, open a terminal and run the following command to confirm that pip (or pipx) is indeed installed:
.. code:: bash
pip --version
+
.. note::
- If you only intend to use pyimpspec via the CLI or are familiar with `virtual environments `_, then you should consider using `pipx `_ instead of pip to install pyimpspec.
- Pipx will install pyimpspec inside of a virtual environment, which can help with preventing potential version conflicts that may arise if pyimpspec requires an older or a newer version of a dependency than another package.
- Pipx also manages these virtual environments and makes it easy to run applications/packages.
+ Using a Python `virtual environments `_ is highly recommended in order to avoid possible issues related to conflicting versions of dependencies installed on a system.
+ Such a virtual environment needs to be activated before running a script that imports a package installed inside the virtual environment.
+ The system-wide Python environment may also be `externally managed `_ in order to prevent the user from accidentally breaking that environment since the operating system depends upon the packages in that environment.
+
+ A third-party tool called `pipx `_ can automatically manage such virtual environments but it is primarily for installing programs that provide, e.g., a command-line interface (CLI) or a graphical user interface (GUI).
+ These programs can then be run without having to manually activate the virtual environment since pipx handles that.
+ The virtual environment would still need to be activated before running a script that imports DearEIS and makes use of DearEIS's application programming interface (API).
+
+If using pipx, then run the following command to make sure that pipx is available.
+If pipx is not available, then follow the `instructions to install pipx `_.
+
+.. code:: bash
+
+ pipx --version
-If there are no errors, then run the following command to install pyimpspec and its dependencies:
+If there are no errors, then run one of the following commands to install pyimpspec and its dependencies:
.. code:: bash
+ # If manually managing the virtual environment,
+ # follow the relevant pip documentation for creating
+ # and activating a virtual environment before running
+ # the following command.
pip install pyimpspec
+
+ # If pipx is used to automatically manage the virtual environment.
+ pipx install pyimpspec
+
+Pyimpspec should now be importable in, e.g., Python scripts and Jupyter notebooks provided that the virtual environment has been activated.
+
+If you wish to install the optional dependencies, then they can be specified explicitly when installing pyimpspec via pip:
+
+.. code:: bash
+
+ pip install pyimpspec[cvxopt]
-Pyimpspec should now be importable in, e.g., Python scripts and Jupyter notebooks.
-If you wish to install the optional dependencies, then they must either be specified explicitly when installing pyimpspec or installed separately later:
+Optional dependencies can also be install after the fact if pipx was used:
.. code:: bash
- pip install pyimpspec[cvxpy]
+ pipx inject pyimpspec cvxopt
-Newer versions of pyimpspec can be installed at a later date by adding the ``--upgrade`` option to the command:
+
+Newer versions of pyimpspec can be installed in the following ways:
.. code:: bash
- pip install --upgrade pyimpspec
+ pip install pyimpspec --upgrade
+
+ pipx upgrade pyimpspec --include-injected
Using the API
-------------
-Pyimpspec should now be accessible in Python:
+Pyimpspec should be accessible in Python provided that the virtual environment has been activated:
.. doctest::
@@ -99,7 +123,7 @@ Pyimpspec should now be accessible in Python:
Running the CLI program
-----------------------
-You should now be able to run pyimpspec in a terminal:
+You should now also be able to access pyimpspec's CLI in a terminal:
.. code:: bash
diff --git a/docs/source/guide_kramers_kronig.rst b/docs/source/guide_kramers_kronig.rst
index e315c88..8ad27f5 100644
--- a/docs/source/guide_kramers_kronig.rst
+++ b/docs/source/guide_kramers_kronig.rst
@@ -3,104 +3,427 @@
Kramers-Kronig testing
======================
-One method for validating impedance spectra involves the use of Kramers-Kronig_ (KK) transforms.
-Implementations of the three variants of the linear KK transform tests described by `Boukamp (1995)`_ are included in pyimpspec.
-An implementation that uses complex non-linear least squares fitting is also included.
-These tests attempt to fit a generally applicable equivalent circuit (see circuit diagram below) where there can be an arbitrary number of parallel RC elements connected in series.
-This equivalent circuit is KK transformable, which means that if it can be fitted to the data, then the data should also be KK transformable.
-The capacitor and inductor connected in series are necessary for impedance spectra where the imaginary parts of the impedances at the low- and/or high-frequency limits, respectively, do not approach zero.
+One method for validating immittance spectra involves the use of Kramers-Kronig_ (KK) transforms.
+Implementations of the three variants of the linear KK tests (complex, real, and imaginary) described by `Boukamp (1995) `_ are included in pyimpspec.
+These three types of tests have been implemented using least squares fitting.
+Alternative implementations, which were the default implementation before version 5.0.0, based on matrix inversion are also included.
+An implementation that uses complex non-linear least squares fitting is also included, but this tends to be significantly slower than any of the other implementations.
+These tests attempt to fit generally applicable equivalent circuit models (ECM, see the two circuit diagrams below).
+These ECMs are KK transformable, which means that if they can be fitted to the data with small, random residuals, then the data should also be KK transformable.
-.. _`Boukamp (1995)`: https://doi.org/10.1149/1.2044210
+The ECM that is used for the impedance representation of the immittance data is shown below and it can contain many parallel RC elements connected in series.
+The capacitor and inductor connected in series may be necessary for impedance spectra where the imaginary parts do not approach zero at the low- and high-frequency limits, respectively.
.. plot::
- :alt: The type of circuit that is used to check for Kramers-Kronig compliance: a resistance, capacitance, and inductor connected in series to an arbitrary number of parallel RC elements that are also connected in series.
+ :alt: The type of circuit that is used to check impedance data for Kramers-Kronig compliance: a resistance, capacitance, and inductor connected in series to an arbitrary number of parallel RC elements that are connected in series.
from pyimpspec import parse_cdc
circuit = parse_cdc("R(RC)(RC)CL")
elements = circuit.get_elements()
custom_labels = {
elements[0]: r"$R_{\rm ser}$",
- elements[1]: r"$R_i$",
- elements[2]: r"$C_i$",
- elements[3]: r"$R_n$",
- elements[4]: r"$C_n$",
+ elements[1]: r"$R_1$",
+ elements[2]: r"$C_1$",
+ elements[3]: r"$R_k$",
+ elements[4]: r"$C_k$",
elements[5]: r"$C_{\rm ser}$",
elements[6]: r"$L_{\rm ser}$",
}
circuit.to_drawing(custom_labels=custom_labels).draw()
+For the admittance representation of the immittance data, the following equivalent circuit is used instead.
+Here, many series RC elements are connected in parallel.
+Similarly to the circuit shown above, a parallel capacitor and/or a parallel inductor may be needed for some admittance spectra.
+
+.. plot::
+ :alt: The type of circuit that is used to check admittance data for Kramers-Kronig compliance: a resistance, capacitance, and inductor connected in parallel to an arbitrary number of series RC elements that are connected in parallel.
+
+ from pyimpspec import parse_cdc
+ circuit = parse_cdc("(R[RC][RC]CL)")
+ elements = circuit.get_elements()
+ custom_labels = {
+ elements[0]: r"$R_{\rm par}$",
+ elements[1]: r"$R_1$",
+ elements[2]: r"$C_1$",
+ elements[3]: r"$R_k$",
+ elements[4]: r"$C_k$",
+ elements[5]: r"$C_{\rm par}$",
+ elements[6]: r"$L_{\rm par}$",
+ }
+ circuit.to_drawing(custom_labels=custom_labels).draw()
+
A few things to keep in mind about this approach to KK testing:
-- The fitted circuit has no physical significance.
-- An appropriate number of parallel RC elements (i.e., the number of time constants) should be chosen to avoid over- and underfitting (i.e., fitting to the noise or not fitting at all, respectively).
-- Each parallel RC element is replaced with an element where the time constant, :math:`\tau=RC`, is fixed but the resistance, :math:`R`, is still variable (i.e., from :math:`Z=\frac{R}{1+j 2 \pi f R C}` to :math:`Z=\frac{R}{1+j 2 \pi f \tau}`).
+- The fitted circuits have no physical significance and some of the fitted parameters may end up with negative values.
+- Each parallel/series RC element is replaced with an element where the time constant, :math:`\tau=RC`, is fixed but either the resistance, :math:`R`, or the capacitance, :math:`C`, is still variable for the impedance and admittance representations, respectively.
+
+ - :math:`Z(\omega)=\frac{R}{1+j \omega R C}` becomes :math:`Z(\omega)=\frac{R}{1+j \omega \tau}` when operating on the impedance representation.
+ - :math:`Y(\omega)=\frac{C \omega}{\omega R C - j}` becomes :math:`Y(\omega)=\frac{C \omega}{\omega \tau - j}` when operating on the admittance representation.
+
+
+Either the complex or the real test should be used.
+Obtaining good fits with the imaginary test can be challenging even when the immittance spectrum is known to be valid.
+The error is spread out across the real and imaginary parts when using the complex test, which can make it more difficult to spot issues compared to the real test where the error is concentrated on the imaginary part.
+However, the real test can be overly sensitive by comparison and one should keep in mind that, e.g., slight increases in the magnitudes of the residuals at the frequency extremes might be resolved by choosing a more appropriate data representation or optimizing the range of time constants.
+Many immittance spectra can be validated using the range of time constants within the bounds defined by the inverse of the maximum and the minimum excitation frequencies.
+However, in some cases it is necessary to extend the range of time constants by some factor :math:`F_{\rm ext} > 1` so that :math:`\tau \in [\frac{1}{F_{\rm ext} \omega_{\rm max}}, \frac{F_{\rm ext}}{\omega_{\rm min}}]` where :math:`\omega_{\rm max}` and :math:`\omega_{\rm min}` are the maximum and minimum, respectively, of the measured angular frequencies.
+The range may also need to be contracted (i.e., :math:`F_{\rm ext} < 1`).
+Pyimpspec includes an implementation for automatically optimizing |F_ext| and whether or not the suggested |F_ext| is appropriate can be assessed with the help of a 3D plot of |log pseudo chi-squared| as a function of |N_tau| and |log F_ext|.
+
+.. plot::
+
+ from pyimpspec import (
+ generate_mock_data,
+ mpl,
+ )
+ from pyimpspec.analysis.kramers_kronig import evaluate_log_F_ext
+
+ data = generate_mock_data("CIRCUIT_4", noise=5e-2, seed=42)[0]
+ evaluations = evaluate_log_F_ext(data, min_log_F_ext=-1.0, max_log_F_ext=1.0, num_F_ext_evaluations=20)
+ figure, axes = mpl.plot_log_F_ext(evaluations)
+ figure.tight_layout()
+
+ figure, axes = mpl.plot_log_F_ext(evaluations, projection="2d", legend=False)
+ figure.tight_layout()
+
+In the eaxmple above, the default range of time constants (:math:`\log{F_{\rm ext}} = 0`) exhibits a wide range of |N_tau| (:math:`8 < N_\tau < 45`) with a gradual decrease of |pseudo chi-squared|.
+An extended range of time constants (:math:`\log{F_{\rm ext}} = 0.394`, purple markers) is found to be optimal since it achieves a similarly low |pseudo chi-squared| with a lower |N_tau|.
+
+An optimum number of parallel/series RC elements (i.e., the number of time constants or |N_tauopt|) should be chosen to avoid over- and underfitting (i.e., fitting to the noise or not fitting to the data, respectively).
+Pyimpspec implements multiple methods for suggesting |N_tauopt|:
+
+.. list-table:: Methods for suggesting the optimum number of time constants (i.e., the number of parallel/series RC elements).
+ :header-rows: 1
+
+ * - Method
+ - Reference
+ * - 1: |mu|-criterion
+ - `Schönleber et al. (2014) `_
+ * - 2: norm of fitted variables
+ - `Plank et al. (2022) `_
+ * - 3: norm of curvatures
+ - `Plank et al. (2022) `_
+ * - 4: number of sign changes among curvatures
+ - `Plank et al. (2022) `_
+ * - 5: mean distance between sign changes among curvatures
+ - `Yrjänä and Bobacka (2024) `_
+ * - 6: apex of |log sum abs tau R| (or |log sum abs tau C|) *versus* |N_tau|
+ - `Yrjänä and Bobacka (2024) `_
.. note::
- Impedance data that include negative differential resistances cannot be validated directly using the included implementations of the linear Kramers-Kronig tests.
- Adding a parallel resistance of suitable magnitude to the impedance data should produce impedance data that can be validated.
+ The implementations of methods 1, 3, and 4 include some `modifications `_ to make them more robust, but these modifications can be disabled.
+
-There are three approaches available for selecting the number of parallel RC elements in pyimpspec:
+The default approach combines the three methods that are based on the curvatures of the immittance spectrum of the fitted ECM in order to:
-- Manually specifying the number.
-- Using the algorithm described by `Schönleber et al. (2014)`_, which requires choosing a |mu|-criterion value (0.0 to 1.0 where the limits represent over- and underfitting, respectively). See `Lin-KK Tool`_ for an implementation released by that group.
-- An alternative implementation of the algorithm above with additional weighting to help avoid false negatives in some circumstances.
+- minimize the number of sign changes of the curvatures (method 4)
+- minimize the norm of the curvatures (method 3)
+- maximize the mean distance between sign changes of the curvatures (method 5)
-.. _`Schönleber et al. (2014)`: https://doi.org/10.1016/j.electacta.2014.01.034
+Each method represents a stage that is used to narrow down suitable |N_tau| until one remains.
+It is also possible to either choose which method(s) to use or to pick a specific number of time constants manually.
-There are two functions for performing KK tests: |perform_test| and |perform_exploratory_tests|.
-The |perform_test| function returns a single |TestResult| object and can be used to perform the test with either of the first two approaches to choosing the number of parallel RC elements.
-The |perform_exploratory_tests| returns a list of |TestResult| objects, which are sorted from the highest to the lowest scoring result based on the distance of |mu| from the |mu|-criterion and how good the fit is.
+Pyimpspec also includes automatic estimation of the lower and upper limits for |N_tauopt| in order to reduce the probability of suggesting an |N_tauopt| that is either too small or too large.
+The lower limit is estimated using a plot of |log pseudo chi-squared| as a function of |N_tau| while the upper limit is estimated with the help of method 5 (i.e., the mean distances between sign changes of the curvature of the impedance spectra of the fitted ECMs).
+Either limit, both limits, and/or the difference between the limits can also be specified manually.
+
+
+How to use
+----------
+
+A KK test can be performed by calling the |perform_kramers_kronig_test| function, which returns a |KramersKronigResult| object. This function acts as a wrapper for several other functions that can also be called individually: |evaluate_log_F_ext|, |suggest_num_RC_limits|, |suggest_num_RC|, and |suggest_representation|.
+
+The |evaluate_log_F_ext| function attempts to optimize the range of time constants (i.e., optimize |F_ext|), but the value of |F_ext| can also be specified explicitly.
+A list of |KramersKronigResult| can be supplied to the |suggest_num_RC_limits| and |suggest_num_RC| functions.
+The former function will return the estimated lower and upper limits (|N_taumin| and |N_taumax|, respectively) of |N_tau| where |N_tauopt| is likely to exist.
+The latter function will return a tuple containing the suggested |KramersKronigResult| instance (i.e., the one that corresponds to |N_tauopt|), a dictionary that maps the numbers of time constants to the scores that were used to suggest |N_tauopt|, and the estimated |N_taumin| and |N_taumax|.
+A list of these tuples, where each tuple corresponds to a KK test that was performed on either the impedance or the admittance representation, can then be provided to the |suggest_representation| function.
+If |perform_kramers_kronig_test| is called with ``admittance=None``, then both the impedance and the admittance representation are tested.
+Otherwise, only either the impedance (``admittance=False``) or the admittance (``admittance=True``) is tested.
.. doctest::
>>> from pyimpspec import (
- ... TestResult,
- ... perform_exploratory_tests,
- ... perform_test,
+ ... DataSet,
+ ... KramersKronigResult,
+ ... generate_mock_data,
+ ... perform_kramers_kronig_test,
... )
- >>> from pyimpspec.mock_data import EXAMPLE
- >>> from typing import List
+ >>> from pyimpspec.analysis.kramers_kronig import (
+ ... evaluate_log_F_ext,
+ ... suggest_num_RC,
+ ... suggest_representation,
+ ... )
+ >>> from typing import Dict, List, Tuple
+ >>>
+ >>>
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ >>>
+ >>> test: KramersKronigResult # The suggested result
+ >>> test = perform_kramers_kronig_test(data)
+ >>> # The line above is equivalent to the lines below
+ >>> # in terms of the work that is performed
+ >>>
+ >>> Z_evaluations: List[Tuple[float, List[KramersKronigResult], float]]
+ >>> Z_evaluations = evaluate_log_F_ext(data, admittance=False)
+ >>>
+ >>> Z_suggested_F_ext: float
+ >>> Z_tests: List[KramersKronigResult]
+ >>> Z_minimized_statistic: float
+ >>> Z_suggested_F_ext, Z_tests, Z_minimized_statistic = Z_evaluations[0]
+ >>>
+ >>> Z_suggestion: Tuple[KramersKronigResult, Dict[int, float], int, int]
+ >>> Z_suggestion = suggest_num_RC(Z_tests)
+ >>>
+ >>> Y_evaluations: List[Tuple[float, List[KramersKronigResult], float]]
+ >>> Y_evaluations = evaluate_log_F_ext(data, admittance=True)
+ >>>
+ >>> Y_tests: List[KramersKronigResult] = Y_evaluations[0][1]
+ >>>
+ >>> Y_suggestion: Tuple[KramersKronigResult, Dict[int, float], int, int]
+ >>> Y_suggestion = suggest_num_RC(Y_tests)
+ >>>
+ >>> suggestion: Tuple[KramersKronigResult, Dict[int, float], int, int]
+ >>> suggestion = suggest_representation([Z_suggestion, Y_suggestion])
+ >>>
+ >>> scores: Dict[int, float] # Scores for various numbers of RC elements
+ >>> lower_limit: int
+ >>> upper_limit: int
+ >>> test, scores, lower_limit, upper_limit = suggestion
+
+
+A single |KramersKronigResult| can be plotted on its own, but it is also possible to plot the suggested |KramersKronigResult| along with the |pseudo chi-squared| values of all |KramersKronigResult| instances so that one can see if the suggested |KramersKronigResult| is indeed the best choice.
+
+
+.. plot::
+
+ from pyimpspec import generate_mock_data
+ from pyimpspec.analysis.kramers_kronig import evaluate_log_F_ext, suggest_num_RC
+ from pyimpspec import mpl
+
+ data = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ tests = evaluate_log_F_ext(data)[0][1]
+ suggestion = suggest_num_RC(tests)
+
+ figure, axes = mpl.plot_kramers_kronig_tests(
+ tests,
+ suggestion,
+ data,
+ legend=False,
+ colored_axes=True,
+ )
+ figure.tight_layout()
+
+
+From the top-left plot one can see that the estimated lower and upper limits define a range of |N_tau| values (filled circles, the y-axis on the left-hand side) where |N_tauopt| value is estimated to exist.
+The y-axis on the right-hand side shows the scores assigned based on an approach that makes use of methods 3, 4, and 5.
+These scores are then used to suggest |N_tauopt| (dashed line).
+
+The |perform_kramers_kronig_test| function takes keyword arguments that can be passed on to the |suggest_num_RC| function.
+This can be used to, e.g., select which method(s) to use or to adjust any method-specific settings such as the |mu|-criterion of method 1.
+
+.. doctest::
+
+ >>> from pyimpspec import (
+ ... DataSet,
+ ... KramersKronigResult,
+ ... generate_mock_data,
+ ... perform_kramers_kronig_test,
+ ... )
+ >>> from pyimpspec.analysis.kramers_kronig import (
+ ... evaluate_log_F_ext,
+ ... suggest_num_RC,
+ ... )
+ >>> from typing import List, Tuple
>>>
>>> mu_criterion: float = 0.85
- >>> test: TestResult = perform_test(EXAMPLE, mu_criterion=mu_criterion)
- >>> tests: List[TestResult] = perform_exploratory_tests(
- ... EXAMPLE,
+ >>>
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ >>>
+ >>> test: KramersKronigResult
+ >>> test = perform_kramers_kronig_test(data, mu_criterion=mu_criterion)
+ >>> # The above is equivalent to the following lines
+ >>> # in terms of the work that is performed
+ >>>
+ >>> evaluations: List[Tuple[float, List[KramersKronigResult], float]]
+ >>> evaluations = evaluate_log_F_ext(data)
+ >>>
+ >>> optimum_log_Fext: Tuple[float, List[KramersKronigResult], float]
+ >>> optimum_log_Fext = evaluations[0]
+ >>>
+ >>> tests: List[KramersKronigResult] = optimum_log_Fext[1]
+ >>> suggestion: Tuple[KramersKronigResult, Dict[int, float], int, int] = suggest_num_RC(
+ ... tests,
... mu_criterion=mu_criterion,
- ... ) # tests[0] is the highest-scoring result
+ ... )
+ >>>
+ >>> scores: Dict[int, float]
+ >>> lower_limit: int
+ >>> upper_limit: int
+ >>> test, scores, lower_limit, upper_limit = suggestion
+
+
+The plot of relative residuals is typically used to interpret the validity of the immittance spectrum that was tested. Alternatively, statistical tests performed on the residuals can also be used.
+
+.. doctest::
+
+ >>> from pyimpspec import (
+ ... DataSet,
+ ... KramersKronigResult,
+ ... generate_mock_data,
+ ... perform_kramers_kronig_test,
+ ... )
+ >>> from pyimpspec.analysis.kramers_kronig import (
+ ... evaluate_log_F_ext,
+ ... suggest_num_RC,
+ ... suggest_representation,
+ ... )
+ >>> from typing import List, Tuple
+ >>>
+ >>>
+ >>> data: DataSet = generate_mock_data("CIRCUIT_1", noise=5e-2, seed=42)[0]
+ >>>
+ >>> test: KramersKronigResult # The suggested result
+ >>> test = perform_kramers_kronig_test(data)
+ >>> statistics: str = test.to_statistics_dataframe().to_markdown(index=False)
-The three figures below present the results of using |perform_exploratory_tests| (i.e., the last of the three approaches listed above) with the example data.
-The first figure plots |mu| and |pseudo chi-squared| as a function of the number of parallel RC elements.
-From this plot one can see that 19 seems to be an appropriate number of parallel RC elements to use in this case.
-The second figure plots the relative residuals of the real and imaginary parts of the impedances of the fitted circuit and the example data.
-From this plot one can see that the residuals are small and randomly distributed around zero, which is what one would hope to see for an impedance spectrum with low noise (or none at all).
-The third figure plots the impedance spectrum and the fitted circuit as a Nyquist plot.
-From this plot one can see that the fit is indeed good.
+
+The contents of ``statistics`` would look something like:
+
+.. code::
+
+ | Label | Value |
+ |:----------------------------------------------------|--------------:|
+ | Log pseudo chi-squared | -4.46966 |
+ | Number of RC elements | 13 |
+ | Log Fext (extension factor for time constant range) | -0.100386 |
+ | Series resistance (ohm) | 103.525 |
+ | Series capacitance (F) | 0.0120676 |
+ | Series inductance (H) | -2.24707e-06 |
+ | Mean of residuals, real (% of |Z|) | -5.25249e-06 |
+ | Mean of residuals, imag. (% of |Z|) | 0.00220288 |
+ | SD of residuals, real (% of |Z|) | 0.0523757 |
+ | SD of residuals, imag. (% of |Z|) | 0.075694 |
+ | Residuals within 1 SD, real (%) | 68.2927 |
+ | Residuals within 1 SD, imag. (%) | 58.5366 |
+ | Residuals within 2 SD, real (%) | 97.561 |
+ | Residuals within 2 SD, imag. (%) | 95.122 |
+ | Residuals within 3 SD, real (%) | 100 |
+ | Residuals within 3 SD, imag. (%) | 100 |
+ | Lilliefors test p-value, real | 0.252269 |
+ | Lilliefors test p-value, imag. | 0.513698 |
+ | Shapiro-Wilk test p-value, real | 0.591578 |
+ | Shapiro-Wilk test p-value, imag. | 0.168292 |
+ | Estimated SD of Gaussian noise (% of |Z|) | 0.0643079 |
+ | One-sample Kolmogorov-Smirnov test p-value, real | 0.871214 |
+ | One-sample Kolmogorov-Smirnov test p-value, imag. | 0.60763 |
+
+
+All three statistical tests (`Lilliefors `_, `Shapiro-Wilk `_, and `Kolmogorov-Smirnov `_) return :math:`p`-values greater than 0.05 (our chosen threshold) for the residuals of both the real and the imaginary parts. The means of the residuals are close to zero as well. All of this indicates that the tested immittance spectrum is likely to be valid. This is also in agreement with the interpretation based on inspecting the plot of the relative residuals.
+
+
+Some immittance spectra might not be possible to validate based on testing the impedance representation.
+For example, the Nyquist plot below shows a synthetic impedance spectrum that includes a negative differential resistance (the larger, outer loop that goes from the right-hand side to the left-hand side as the frequency is decreased).
+Similar impedance spectra have been reported when measuring, e.g., `in the passive region of a system with a tantalum working electrode in hydrofluoric acid (Fig. 3b in the reference) `_.
.. plot::
- from pyimpspec import perform_exploratory_tests
+ from pyimpspec import mpl, generate_mock_data
+
+ data = generate_mock_data("CIRCUIT_8", noise=5e-2, seed=42)[0]
+ figure, axes = mpl.plot_nyquist(data)
+ figure.tight_layout()
+
+
+Attempting to perform Kramers-Kronig tests on this impedance data as shown in the previous example incorrectly indicates that the spectrum is not linear, causal, and stable.
+
+.. plot::
+
+ from pyimpspec import generate_mock_data
+ from pyimpspec.analysis.kramers_kronig import evaluate_log_F_ext, suggest_num_RC
from pyimpspec import mpl
- from pyimpspec.mock_data import EXAMPLE
- mu_criterion = 0.85
- tests = perform_exploratory_tests(EXAMPLE, mu_criterion=mu_criterion, add_capacitance=True)
- figure, axes = mpl.plot_mu_xps(tests, mu_criterion=mu_criterion)
+ data = generate_mock_data("CIRCUIT_8", noise=5e-2, seed=42)[0]
+ tests = evaluate_log_F_ext(data)[0][1]
+ suggestion = suggest_num_RC(tests)
+
+ figure, axes = mpl.plot_kramers_kronig_tests(tests, suggestion, data, legend=False, colored_axes=True)
figure.tight_layout()
+
+
+However, there are two approaches that can be used to successfully validate this impedance spectrum.
+The first approach is to perform the Kramers-Kronig tests on the admittance data either explicitly (i.e., by specifying ``admittance=True`` when calling the |perform_kramers_kronig_test| and |evaluate_log_F_ext| functions) or by calling |perform_kramers_kronig_test| with ``admittance=None`` (default value). The latter should then test both the impedance and the admittance representation before ultimately suggesting the result for the most appropriate represention, which in this case is the admittance representation.
+
+.. plot::
+
+ from pyimpspec import generate_mock_data
+ from pyimpspec.analysis.kramers_kronig import evaluate_log_F_ext, suggest_num_RC
+ from pyimpspec import mpl
+
+ data = generate_mock_data("CIRCUIT_8", noise=5e-2, seed=42)[0]
+ tests = evaluate_log_F_ext(data, admittance=True)[0][1]
+ suggestion = suggest_num_RC(tests)
- figure, axes = mpl.plot_residuals(tests[0])
+ figure, axes = mpl.plot_kramers_kronig_tests(tests, suggestion, data, legend=False, colored_axes=True)
figure.tight_layout()
+
+
+The second approach is to add a parallel resistance of a suitable magnitude to the impedance data and to perform the Kramers-Kronig tests on the resulting impedance data.
+
+.. plot::
+
+ from pyimpspec import parse_cdc
+ # A Warburg impedance is used here just to have two different symbols
+ circuit = parse_cdc("(WR)")
+ elements = circuit.get_elements()
+ custom_labels = {
+ elements[0]: r"$Z_{\rm data}$",
+ elements[1]: r"$R_{\rm par}$",
+ }
+ circuit.to_drawing(custom_labels=custom_labels).draw()
+
+
+The resistance, :math:`R_{\rm par}`, is known *a priori* to be KK transformable.
+Adding the resistance in parallel to the experimental data, which is represented in this circuit diagram as :math:`Z_{\rm data}`, does not negatively affect the compliance of the resulting circuit.
+Thus, the KK compliance of the resulting circuit is dependent on whether or not :math:`Z_{\rm data}` is KK compliant.
+
+.. note::
+
+ The magnitude of the resistance to choose depends on the original impedance data.
+ In this example, the real part of the impedance at the lowest frequency in the original data is approximately :math:`-100` |ohm|.
+ A value of 50 |ohm| was chosen for the parallel resistance after testing a few different values.
+
+As can be seen from the results below, the new, and thus also the original, impedance data has been validated successfully.
+
+.. plot::
+
+ from pyimpspec import Resistor, DataSet, generate_mock_data
+ from pyimpspec.analysis.kramers_kronig import evaluate_log_F_ext, suggest_num_RC
+ from pyimpspec import mpl
- figure, axes = mpl.plot_nyquist(tests[0], line=True)
- _ = mpl.plot_nyquist(EXAMPLE, figure=figure, axes=axes, colors={"impedance": "black"})
+ data = generate_mock_data("CIRCUIT_8", noise=5e-2, seed=42)[0]
+ f = data.get_frequencies()
+ Z_data = data.get_impedances()
+ R = Resistor(R=50)
+ Z_res = R.get_impedances(f)
+ data = DataSet(frequencies=f, impedances=1/(1/Z_data + 1/Z_res), label=f"With parallel R={R.get_value('R'):.0f} $\Omega$")
+
+ tests = evaluate_log_F_ext(data)[0][1]
+ suggestion = suggest_num_RC(tests)
+
+ figure, axes = mpl.plot_kramers_kronig_tests(tests, suggestion, data, legend=False, colored_axes=True)
figure.tight_layout()
+
References:
- Boukamp, B.A., 1995, J. Electrochem. Soc., 142, 1885-1894 (https://doi.org/10.1149/1.2044210)
- Schönleber, M., Klotz, D., and Ivers-Tiffée, E., 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
+- Plank, C., Rüther, T., and Danzer, M.A., 2022, 2022 International Workshop on Impedance Spectroscopy (https://doi.org/10.1109/IWIS57888.2022.9975131)
+- Yrjänä, V and Bobacka, J., 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
.. raw:: latex
diff --git a/docs/source/guide_zhit.rst b/docs/source/guide_zhit.rst
index 9cdc03e..4456f1f 100644
--- a/docs/source/guide_zhit.rst
+++ b/docs/source/guide_zhit.rst
@@ -3,13 +3,13 @@
Z-HIT analysis
==============
-The `Z-HIT algorithm `_ (Ehm et al., 2000) reconstructs the modulus data of an impedance spectrum based on the phase data of that impedance spectrum.
-This algorithm can be used to, e.g., validate impedance spectra.
+The `Z-HIT algorithm `_ (Ehm et al., 2000) reconstructs the modulus data of an immittance spectrum based on the phase data of that immittance spectrum.
+This algorithm can be used to help validate immittance spectra.
Drifting at low frequencies and mutual induction at high frequencies may be detectable based on the results of the algorithm.
-The algorith is based on the following steps:
+The algorithm is based on the following steps:
-- Smoothing and interpolation of the phase data using, e.g., `LOWESS `_ and an `Akima spline `_, respectively.
+- Smoothing and interpolation of the phase data using, e.g., a `Savitzky-Golay filter `_ and an `Akima spline `_, respectively.
- Approximation of the modulus data according to
:math:`\ln{|Z(\omega_O)|} \approx \frac{2}{\pi} \int_{\omega_S}^{\omega_O} \varphi(\omega) \,{\rm d}\ln{\omega} + \gamma \frac{{\rm d}\varphi(\omega_O)}{{\rm d}\ln{\omega}} + C`
@@ -17,38 +17,44 @@ The algorith is based on the following steps:
where :math:`\omega_S` is the starting frequency, :math:`\omega_O` is the frequency of interest, :math:`\varphi` is the interpolated phase data, :math:`\gamma = \frac{\pi}{6}`, and :math:`C` is a constant.
- The constant :math:`C` is determined by fitting the approximated modulus data to a portion of the experimental modulus data.
- The frequency range from 1 Hz to 1000 Hz is typically less affected by drift or mutual induction.
+ The frequency range from 1 Hz to 1000 Hz is a good starting point since it is typically less affected by drift and/or mutual induction.
.. note::
- The reconstruction of the modulus data is not perfect and there can be minor deviations even when analyzing ideal data.
+ The reconstruction of the modulus data is not likely to be perfect and there can be deviations even when analyzing ideal data.
+How to use
+----------
+
.. doctest::
- >>> from pyimpspec import ZHITResult, perform_zhit
- >>> from pyimpspec.mock_data import DRIFTING_RANDLES
+ >>> from pyimpspec import DataSet, ZHITResult, generate_mock_data, perform_zhit
>>>
- >>> zhit: ZHITResult = perform_zhit(DRIFTING_RANDLES)
+ >>> data: DataSet = generate_mock_data("CIRCUIT_2_INVALID", noise=5e-2, seed=42)[0]
+ >>> zhit: ZHITResult = perform_zhit(data)
Below is an example where simplified Randles circuits with or without drifting have been plotted as "Drifting" and "Valid", respectively.
-The reconstructed impedance spectrum ("Z-HIT") has also been plotted and it is a close match to the impedance spectrum of the circuit without drift.
+The impedance spectrum ("Z-HIT") reconstructed from the phase data of the drifting spectrum has also been plotted and it is a close match to the impedance spectrum without drift.
.. plot::
- from pyimpspec import perform_zhit
+ from pyimpspec import perform_zhit, generate_mock_data
from pyimpspec import mpl
- from pyimpspec.mock_data import DRIFTING_RANDLES, VALID_RANDLES
- zhit = perform_zhit(DRIFTING_RANDLES)
+
+ valid = generate_mock_data("CIRCUIT_2", noise=5e-2, seed=42)[0]
+ invalid = generate_mock_data("CIRCUIT_2_INVALID", noise=5e-2, seed=42)[0]
+ zhit = perform_zhit(invalid)
+
figure, axes = mpl.plot_bode(
- VALID_RANDLES,
+ valid,
legend=False,
colors={"magnitude": "black", "phase": "black"},
markers={"magnitude": "o", "phase": "s"},
)
mpl.plot_bode(
- DRIFTING_RANDLES,
+ invalid,
legend=False,
colors={"magnitude": "black", "phase": "black"},
markers={"magnitude": "x", "phase": "+"},
@@ -62,16 +68,23 @@ The reconstructed impedance spectrum ("Z-HIT") has also been plotted and it is a
figure=figure,
axes=axes,
)
+
lines = []
labels = []
for ax in axes:
- lin, lab = ax.get_legend_handles_labels()
- lines.extend(lin)
- labels.extend(lab)
+ li, la = ax.get_legend_handles_labels()
+ lines.extend(li)
+ labels.extend(la)
+
axes[1].legend(lines, labels, loc=(0.03, 0.13))
figure.tight_layout()
+.. note::
+
+ Pyimpspec's implementation of the algorithm also supports operating on the admittance representation of the immittance data, which can be done by setting ``admittance=True`` when calling |perform_zhit|.
+
+
References:
- Ehm, W., Göhr, H., Kaus, R., Röseler, B., and Schiller, C.A., 2000, Acta Chimica Hungarica, 137 (2-3), 145-157.
diff --git a/docs/source/substitutions.rst b/docs/source/substitutions.rst
index 022f24d..35e4db2 100644
--- a/docs/source/substitutions.rst
+++ b/docs/source/substitutions.rst
@@ -1,8 +1,21 @@
+.. |alpha| replace:: :math:`\alpha`
+.. |beta| replace:: :math:`\beta`
.. |mu| replace:: :math:`\mu`
+.. |mu crit| replace:: :math:`\mu_{\rm crit}`
+.. |kappa| replace:: :math:`\kappa`
.. |lambda| replace:: :math:`\lambda`
.. |chi-squared| replace:: :math:`\chi^2`
-.. |pseudo chi-squared| replace:: :math:`\chi^2_{ps.}`
+.. |pseudo chi-squared| replace:: :math:`\chi^2_{\rm ps}`
+.. |log pseudo chi-squared| replace:: :math:`\log{\chi^2_{\rm ps}}`
+.. |N_tau| replace:: :math:`N_\tau`
+.. |N_tauopt| replace:: :math:`N_{\tau\rm,opt}`
+.. |N_taumin| replace:: :math:`N_{\tau\rm,min}`
+.. |N_taumax| replace:: :math:`N_{\tau\rm,max}`
+.. |F_ext| replace:: :math:`F_{\rm ext}`
+.. |log F_ext| replace:: :math:`\log{F_{\rm ext}}`
.. |ohm| replace:: :math:`\Omega`
+.. |log sum abs tau R| replace:: :math:`\log{\Sigma_{k=1}^{N_\tau} |\tau_k / R_k|}`
+.. |log sum abs tau C| replace:: :math:`\log{\Sigma_{k=1}^{N_\tau} |\tau_k / C_k|}`
.. classes
|| replace:: :class:`~pyimpspec.`
@@ -25,7 +38,7 @@
.. |SubcircuitDefinition| replace:: :class:`~pyimpspec.SubcircuitDefinition`
.. |TRNNLSResult| replace:: :class:`~pyimpspec.TRNNLSResult`
.. |TRRBFResult| replace:: :class:`~pyimpspec.TRRBFResult`
-.. |TestResult| replace:: :class:`~pyimpspec.TestResult`
+.. |KramersKronigResult| replace:: :class:`~pyimpspec.KramersKronigResult`
.. |TransmissionLineModel| replace:: :class:`~pyimpspec.circuit.elements.TransmissionLineModel`
.. type hints
@@ -57,17 +70,24 @@
.. functions
|| replace:: :func:`~pyimpspec.`
+.. |calculate_drt| replace:: :func:`~pyimpspec.calculate_drt`
.. |dataframe_to_data_sets| replace:: :func:`~pyimpspec.dataframe_to_data_sets`
+.. |fit_circuit| replace:: :func:`~pyimpspec.fit_circuit`
.. |get_default_num_procs| replace:: :func:`~pyimpspec.get_default_num_procs`
-.. |set_default_num_procs| replace:: :func:`~pyimpspec.set_default_num_procs`
+.. |mu.calculate_score| replace:: :func:`~pyimpspec.analysis.kramers_kronig.algorithms.mu_criterion.calculate_score`
.. |parse_cdc| replace:: :func:`~pyimpspec.parse_cdc`
.. |parse_data| replace:: :func:`~pyimpspec.parse_data`
-.. |perform_exploratory_tests| replace:: :func:`~pyimpspec.perform_exploratory_tests`
-.. |perform_test| replace:: :func:`~pyimpspec.perform_test`
-.. |fit_circuit| replace:: :func:`~pyimpspec.fit_circuit`
+.. |perform_kramers_kronig_test| replace:: :func:`~pyimpspec.perform_kramers_kronig_test`
+.. |perform_exploratory_kramers_kronig_tests| replace:: :func:`~pyimpspec.perform_exploratory_kramers_kronig_tests`
+.. |perform_zhit| replace:: :func:`~pyimpspec.perform_zhit`
.. |plot_circuit| replace:: :func:`~pyimpspec.plot.mpl.plot_circuit`
-.. |simulate_spectrum| replace:: :func:`~pyimpspec.simulate_spectrum`
.. |register_element| replace:: :func:`~pyimpspec.register_element`
+.. |set_default_num_procs| replace:: :func:`~pyimpspec.set_default_num_procs`
+.. |simulate_spectrum| replace:: :func:`~pyimpspec.simulate_spectrum`
+.. |suggest_num_RC_limits| replace:: :func:`~pyimpspec.analysis.kramers_kronig.suggest_num_RC_limits`
+.. |suggest_num_RC| replace:: :func:`~pyimpspec.analysis.kramers_kronig.suggest_num_RC`
+.. |suggest_representation| replace:: :func:`~pyimpspec.analysis.kramers_kronig.suggest_representation`
+.. |evaluate_log_F_ext| replace:: :func:`~pyimpspec.analysis.kramers_kronig.evaluate_log_F_ext`
.. links
.. _circuitikz: https://github.com/circuitikz/circuitikz
@@ -77,11 +97,14 @@
.. _lin-kk tool: https://www.iam.kit.edu/et/english/Lin-KK.php
.. _lmfit.minimize: https://lmfit.github.io/lmfit-py/fitting.html#lmfit.minimizer.minimize
.. _matplotlib: https://matplotlib.org
-.. _pandas.dataframe: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html#pandas.DataFrame
+.. _numpy.inf: https://numpy.org/doc/stable/reference/constants.html#numpy.inf
+.. _numpy.nan: https://numpy.org/doc/stable/reference/constants.html#numpy.nan
.. _pandas.dataframe.to_latex: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_latex.html#pandas.DataFrame.to_latex
.. _pandas.dataframe.to_markdown: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_markdown.html#pandas.DataFrame.to_markdown
+.. _pandas.dataframe: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html#pandas.DataFrame
.. _randles: https://en.wikipedia.org/wiki/Randles_circuit
.. _schemdraw: https://schemdraw.readthedocs.io/en/latest/
+.. _schemdraw.drawing: https://schemdraw.readthedocs.io/en/latest/classes/drawing.html#schemdraw.Drawing
.. _scipy.signal.savgol_filter: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html
.. _statsmodels.nonparametric.smoothers_lowess.lowess: https://www.statsmodels.org/dev/generated/statsmodels.nonparametric.smoothers_lowess.lowess.html
.. _sympy: https://www.sympy.org/en/index.html
diff --git a/post-build.py b/post-build.py
index 8e50612..d77ac43 100644
--- a/post-build.py
+++ b/post-build.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,17 +19,7 @@
from dataclasses import dataclass
from datetime import date
-from os import (
- makedirs,
- remove,
- walk,
-)
-from os.path import (
- exists,
- isdir,
- join,
- splitext,
-)
+from pathlib import Path
from re import search
from shutil import (
copy,
@@ -45,18 +35,27 @@
)
-def copy_html(src: str, dst: str):
- if exists(dst):
+PARENT_DIRECTORY: Path = Path(__file__).parent
+
+
+def copy_html(src: Path, dst: Path):
+ if dst.is_dir():
rmtree(dst)
- files: List[str] = []
- for _, _, files in walk(src):
- break
+
+ files: List[Path] = []
+
+ path: Path
+ for path in src.glob("*"):
+ if path.is_file():
+ files.append(path)
+
assert len(files) > 0
+
files = [
- _
- for _ in files
- if not _.startswith(".")
- and splitext(_)[1]
+ path
+ for path in files
+ if not str(path).startswith(".")
+ and path.suffix.lower()
in (
".html",
".js",
@@ -65,26 +64,33 @@ def copy_html(src: str, dst: str):
".svg",
)
]
- dirs: List[str] = ["_images", "_static", "_sources"]
- if not isdir(dst):
- makedirs(dst)
- name: str
- for name in files:
- copy(join(src, name), join(dst, name))
- for name in dirs:
- copytree(join(src, name), join(dst, name))
+
+ dirs: List[Path] = list(map(Path, ("_images", "_static", "_sources")))
+
+ if not dst.is_dir():
+ dst.mkdir(parents=True)
+
+ for path in files:
+ copy(src.joinpath(path.name), dst.joinpath(path.name))
+
+ for path in dirs:
+ copytree(src.joinpath(path.name), dst.joinpath(path.name))
-def copy_pdf(src: str, dst: str, name: str, version_path: str):
+def copy_pdf(src: Path, dst: Path, name: str, version_path: Path):
version: str = ""
+
fp: IO
with open(version_path, "r") as fp:
version = fp.read().strip().replace(".", "-")
+
assert version != ""
- ext: str = splitext(src)[1]
- dst = join(dst, f"{name}-{version}{ext}")
- if exists(dst):
- remove(dst)
+
+ ext: str = src.suffix
+ dst = dst.joinpath(f"{name}-{version}{ext}")
+ if dst.is_file():
+ dst.unlink()
+
copy(src, dst)
@@ -98,7 +104,7 @@ class Version:
day: int
-def validate_changelog(path: str):
+def validate_changelog(path: Path):
def parse_version(match: Match) -> Version:
return Version(
major=int(match.group("major")),
@@ -110,7 +116,8 @@ def parse_version(match: Match) -> Version:
)
def validate_date(
- version: Version, comparison: Union[Version, date] = date.today()
+ version: Version,
+ comparison: Union[Version, date] = date.today(),
):
assert version.year <= comparison.year, (version, comparison)
assert 1 <= version.month <= 12, version
@@ -122,24 +129,32 @@ def validate_date(
def validate_version(earlier: Version, current: Version):
assert earlier.major <= current.major, (earlier, current)
+
if earlier.major < current.major:
return
+
assert earlier.minor <= current.minor, (earlier, current)
+
if earlier.minor < current.minor:
return
+
assert earlier.patch < current.patch, (earlier, current)
- assert exists(path), path
+ assert path.is_file(), path
+
fp: IO
with open(path, "r") as fp:
lines: List[str] = fp.readlines()
+
pattern: str = (
r"# (?P\d+)\.(?P\d+)\.(?P\d+)"
r" \((?P\d{4})/(?P\d{2})/(?P\d{2})\)"
)
+
try:
match: Optional[Match] = search(pattern, lines.pop(0))
assert match is not None, pattern
+
versions: List[Version] = list(
map(
parse_version,
@@ -151,25 +166,28 @@ def validate_version(earlier: Version, current: Version):
],
)
)
+
list(map(validate_date, versions))
+
while len(versions) > 1:
current: Version = versions.pop(0)
earlier = versions[0]
validate_date(earlier, current)
validate_version(earlier, current)
+
except AssertionError:
raise Exception("The changelog needs to be updated!")
if __name__ == "__main__":
copy_html(
- src="./docs/build/html",
- dst="./dist/html",
+ src=PARENT_DIRECTORY.joinpath("docs", "build", "html"),
+ dst=PARENT_DIRECTORY.joinpath("dist", "html"),
)
copy_pdf(
- src="./docs/build/latex/latex/pyimpspec.pdf",
- dst="./dist",
+ src=PARENT_DIRECTORY.joinpath("docs", "build", "latex", "latex", "pyimpspec.pdf"),
+ dst=PARENT_DIRECTORY.joinpath("dist"),
name="pyimpspec",
- version_path="./version.txt",
+ version_path=PARENT_DIRECTORY.joinpath("version.txt"),
)
- validate_changelog("./CHANGELOG.md")
+ validate_changelog(PARENT_DIRECTORY.joinpath("CHANGELOG.md"))
diff --git a/pre-build.py b/pre-build.py
index 9328351..ebabfc8 100644
--- a/pre-build.py
+++ b/pre-build.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,68 +17,71 @@
# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
# the LICENSES folder.
+from pathlib import Path
+from shutil import rmtree
from typing import (
- List,
IO,
+ List,
)
-from os import (
- makedirs,
- walk,
-)
-from os.path import (
- exists,
- isfile,
- isdir,
- join,
-)
-from shutil import rmtree
-def update_file(src: str, dst: str):
- if not isfile(src):
+PARENT_DIRECTORY: Path = Path(__file__).parent
+
+
+def update_file(src: Path, dst: Path):
+ if not src.is_file():
return
+
src_contents: str = ""
+
fp: IO
with open(src, "r") as fp:
src_contents = fp.read()
- if isfile(dst):
+
+ if dst.is_file():
with open(dst, "r") as fp:
if fp.read() == src_contents:
return
+
with open(dst, "w") as fp:
fp.write(src_contents)
-def copy_additional_files(files):
- src_dir: str = "."
- dst_dir: str = join(".", "src", "pyimpspec")
- licenses_dir: str = join(dst_dir, "LICENSES")
- if not isdir(licenses_dir):
- makedirs(licenses_dir)
- path: str
+def copy_additional_files(files: List[Path]):
+ src_dir: Path = PARENT_DIRECTORY
+ dst_dir: Path = src_dir.joinpath("src", "pyimpspec")
+ licenses_dir: Path = dst_dir.joinpath("LICENSES")
+ if not licenses_dir.is_dir():
+ licenses_dir.mkdir(parents=True)
+
+ path: Path
for path in files:
- update_file(join(src_dir, path), join(dst_dir, path))
+ update_file(src_dir.joinpath(path), dst_dir.joinpath(path))
if __name__ == "__main__":
- data_files: List[str] = [
+ data_files: List[Path] = list(map(Path, (
"CHANGELOG.md",
"CONTRIBUTORS",
"COPYRIGHT",
"LICENSE",
"README.md",
- ]
- files: List[str]
- for _, _, files in walk("LICENSES"):
- data_files.extend(map(lambda _: join("LICENSES", _), files))
- break
- assert all(map(lambda _: isfile(_), data_files))
+ )))
+
+ path: Path
+ for path in PARENT_DIRECTORY.joinpath("LICENSES").glob("*"):
+ data_files.append(path)
+
+ assert all(map(lambda path: path.is_file(), data_files))
+
copy_additional_files(data_files)
+
# Remove old dist files
- dist_output: str = "./dist"
- if exists(dist_output):
+ dist_output: Path = PARENT_DIRECTORY.joinpath("dist")
+ if dist_output.is_dir():
rmtree(dist_output)
+
# Remove old documentation files to force a rebuild
- docs_output: str = "./docs/build"
- if exists(docs_output):
+ docs_output: Path = PARENT_DIRECTORY.joinpath("docs", "build")
+ if docs_output.is_dir():
rmtree(docs_output)
diff --git a/requirements.txt b/requirements.txt
index cabccb5..97e7dba 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,14 +1,14 @@
Jinja2~=3.1
-lmfit~=1.2
-matplotlib~=3.8
+lmfit~=1.3
+matplotlib~=3.9
numdifftools~=0.9
-numpy~=1.26
+numpy~=2.0
odfpy~=1.4
openpyxl~=3.1
pandas~=2.2
-schemdraw~=0.18
-scipy~=1.12
+schemdraw~=0.19
+scipy~=1.14
statsmodels~=0.14
-sympy~=1.12
+sympy~=1.13
tabulate~=0.9
-xdg~=6.0
+xdg~=6.0
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 037c1f6..ba1fe55 100644
--- a/setup.py
+++ b/setup.py
@@ -9,17 +9,17 @@
dependencies = [
"Jinja2~=3.1", # Needed when turning pandas.DataFrames instances into, e.g., LaTeX strings.
- "lmfit~=1.2", # Needed for performing non-linear fitting.
- "matplotlib~=3.8", # Needed for the plotting module.
+ "lmfit~=1.3", # Needed for performing non-linear fitting.
+ "matplotlib~=3.9", # Needed for the plotting module.
"numdifftools~=0.9", # Needed for estimating uncertainties during circuit fitting
- "numpy~=1.26",
+ "numpy~=2.0",
"odfpy~=1.4", # Needed by pandas for parsing OpenDocument spreadsheet formats.
"openpyxl~=3.1", # Needed by pandas for parsing newer Excel files (.xlsx).
"pandas~=2.2", # Needed for dealing with various file formats.
- "schemdraw~=0.18", # Needed to draw circuit diagrams
- "scipy~=1.12", # Used in the DRT calculations
+ "schemdraw~=0.19", # Needed to draw circuit diagrams
+ "scipy~=1.14", # Used in the DRT calculations
"statsmodels~=0.14", # Used for smoothing (LOWESS) in Z-HIT
- "sympy~=1.12", # Used to generate expressions for circuits
+ "sympy~=1.13", # Used to generate expressions for circuits
"tabulate~=0.9", # Required by pandas to generate Markdown tables.
# TODO: The 'xdg' package has been renamed to 'xdg-base-dirs' and changed
# to only support Python >=3.10. Update at some point in the future.
@@ -28,36 +28,40 @@
dev_dependencies = [
"build~=1.2",
- "flake8~=7.0",
- "setuptools~=70.0",
- "sphinx~=7.3",
+ "flake8~=7.1",
+ "setuptools~=74.0",
+ "sphinx~=8.0",
"sphinx-rtd-theme~=2.0",
]
optional_dependencies = {
"cvxopt": "cvxopt~=1.3", # Used in the DRT calculations (TR-RBF method)
"kvxopt": "kvxopt~=1.3", # Fork of cvxopt that may provide wheels for additional platforms
- "cvxpy": "cvxpy~=1.4", # Used in the DRT calculations (TR-RBF method)
"dev": dev_dependencies,
}
-version = "4.1.1"
+version = "5.0.0"
if __name__ == "__main__":
with open("requirements.txt", "w") as fp:
fp.write("\n".join(dependencies))
+
with open("dev-requirements.txt", "w") as fp:
fp.write("\n".join(dev_dependencies))
+
with open("version.txt", "w") as fp:
fp.write(version)
+
assert version.strip != ""
copyright_notice = ""
if exists("COPYRIGHT"):
with open("COPYRIGHT") as fp:
copyright_notice = fp.read().strip()
assert copyright_notice != ""
+
with open(join("src", "pyimpspec", "version.py"), "w") as fp:
fp.write(f'{copyright_notice}\n\nPACKAGE_VERSION: str = "{version}"')
+
setup(
name="pyimpspec",
version=version,
@@ -78,14 +82,13 @@
entry_points=entry_points,
install_requires=dependencies,
extras_require=optional_dependencies,
- python_requires=">=3.9",
+ python_requires=">=3.10",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
- "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
diff --git a/src/pyimpspec/__init__.py b/src/pyimpspec/__init__.py
index 5173c5f..78b7f82 100644
--- a/src/pyimpspec/__init__.py
+++ b/src/pyimpspec/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -50,16 +50,8 @@
_set_default_num_procs as set_default_num_procs,
)
from pyimpspec.analysis.drt import (
- BHTResult,
DRTResult,
- MRQFitResult,
- TRNNLSResult,
- TRRBFResult,
calculate_drt,
- calculate_drt_bht,
- calculate_drt_mrq_fit,
- calculate_drt_tr_nnls,
- calculate_drt_tr_rbf,
)
from pyimpspec.analysis.fitting import (
FitResult,
@@ -67,9 +59,9 @@
fit_circuit,
)
from pyimpspec.analysis.kramers_kronig import (
- TestResult,
- perform_exploratory_tests,
- perform_test,
+ KramersKronigResult,
+ perform_kramers_kronig_test,
+ perform_exploratory_kramers_kronig_tests,
)
from pyimpspec.analysis.zhit import (
ZHITResult,
@@ -77,3 +69,8 @@
)
from pyimpspec.typing import *
import pyimpspec.plot.mpl as mpl
+from pyimpspec.mock_data import (
+ generate_mock_circuits,
+ generate_mock_data,
+)
+from .version import PACKAGE_VERSION
diff --git a/src/pyimpspec/__main__.py b/src/pyimpspec/__main__.py
index d95775c..ff1f28c 100644
--- a/src/pyimpspec/__main__.py
+++ b/src/pyimpspec/__main__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/src/pyimpspec/analysis/__init__.py b/src/pyimpspec/analysis/__init__.py
index f3d05aa..18b3236 100644
--- a/src/pyimpspec/analysis/__init__.py
+++ b/src/pyimpspec/analysis/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,9 +24,9 @@
fit_circuit,
)
from .kramers_kronig import (
- TestResult,
- perform_exploratory_tests,
- perform_test,
+ KramersKronigResult,
+ perform_kramers_kronig_test,
+ perform_exploratory_kramers_kronig_tests,
)
from .drt import (
BHTResult,
diff --git a/src/pyimpspec/analysis/drt/__init__.py b/src/pyimpspec/analysis/drt/__init__.py
index 0557b64..f3777b6 100644
--- a/src/pyimpspec/analysis/drt/__init__.py
+++ b/src/pyimpspec/analysis/drt/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,9 +20,7 @@
from typing import List
from pyimpspec.data import DataSet
from pyimpspec.exceptions import DRTError
-from .result import (
- DRTResult
-)
+from .result import DRTResult
from .tr_nnls import (
TRNNLSResult,
calculate_drt_tr_nnls,
@@ -73,21 +71,22 @@ def calculate_drt(
-------
DRTResult
"""
- assert (
- hasattr(data, "get_frequencies")
- and callable(data.get_frequencies)
- and hasattr(data, "get_impedances")
- and callable(data.get_impedances)
- ), "Invalid data object!"
- assert isinstance(method, str), method
- if method not in _METHODS:
- raise NotImplementedError(
+ if not isinstance(method, str):
+ raise TypeError(f"Expected a string instead of {method=}")
+ elif method not in _METHODS:
+ raise ValueError(
f"Unsupported method: '{method}'! Valid value include: '"
+ "', '".join(_METHODS)
+ "'."
)
- if method == "tr-nnls" and kwargs.get("mode") is not None and kwargs["mode"] == "complex":
+
+ if (
+ method == "tr-nnls"
+ and kwargs.get("mode") is not None
+ and kwargs["mode"] == "complex"
+ ):
kwargs["mode"] = "real"
+
return {
"bht": calculate_drt_bht,
"mrq-fit": calculate_drt_mrq_fit,
diff --git a/src/pyimpspec/analysis/drt/bht.py b/src/pyimpspec/analysis/drt/bht.py
index c0f1319..1229c96 100644
--- a/src/pyimpspec/analysis/drt/bht.py
+++ b/src/pyimpspec/analysis/drt/bht.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,35 +18,23 @@
# the LICENSES folder.
# This module implements the Bayesian Hilbert transform method
-# 10.1016/j.electacta.2020.136864
+# - 10.1016/j.electacta.2020.136864
# Based on code from https://github.com/ciuccislab/pyDRTtools.
-# pyDRTtools commit: 65ea54d9332a0c6594de852f0242a88e20ec4427
+# pyDRTtools commit: 3694b9b4cef9b29d623bef7300280810ec351d46
from contextlib import redirect_stdout
from dataclasses import dataclass
from multiprocessing import Pool
from os import devnull
-from typing import (
- Callable,
- Dict,
- IO,
- List,
- Optional,
- Tuple,
- Union,
-)
+from sys import version_info as _python_version_info
from numpy import (
array,
diag,
- empty,
empty_like,
exp,
eye,
float64,
- floating,
full,
- integer,
- issubdtype,
log as ln,
log10 as log,
logical_and,
@@ -60,7 +48,7 @@
)
from numpy.linalg import (
cholesky,
- inv as invert,
+ inv,
norm,
solve as solve_linalg,
)
@@ -84,7 +72,6 @@
from pyimpspec.typing import (
ComplexImpedance,
ComplexImpedances,
- ComplexResiduals,
Frequencies,
Gamma,
Gammas,
@@ -92,6 +79,17 @@
TimeConstant,
TimeConstants,
)
+from pyimpspec.typing.helpers import (
+ Callable,
+ Dict,
+ IO,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ _is_integer,
+ _is_floating,
+)
@dataclass(frozen=True)
@@ -171,8 +169,16 @@ def to_peaks_dataframe(
"tau, imag. (s)",
"gamma, imag. (ohm)",
]
- assert isinstance(columns, list), columns
- assert len(columns) == 4
+ elif not isinstance(columns, list):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(columns) != 4:
+ raise ValueError(f"Expected a list with 4 items instead of {len(columns)=}")
+ elif not all(map(lambda s: isinstance(s, str), columns)):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(set(columns)) != 4:
+ raise ValueError(
+ f"Expected a list of 4 unique strings instead of {columns=}"
+ )
def pad(
tau: TimeConstants,
@@ -191,6 +197,7 @@ def pad(
indices_re: Indices = self._get_peak_indices(threshold, self.real_gammas)
indices_im: Indices = self._get_peak_indices(threshold, self.imaginary_gammas)
width: int = max(indices_re.size, indices_im.size)
+
tau_re: TimeConstants
gamma_re: Gammas
tau_re, gamma_re = pad(
@@ -198,6 +205,7 @@ def pad(
self.real_gammas[indices_re],
width,
)
+
tau_im: TimeConstants
gamma_im: Gammas
tau_im, gamma_im = pad(
@@ -205,6 +213,7 @@ def pad(
self.imaginary_gammas[indices_im],
width,
)
+
return DataFrame.from_dict(
{
columns[0]: tau_re,
@@ -222,6 +231,7 @@ def to_statistics_dataframe(
statistics: Dict[str, Union[int, float, str]] = {
"Log pseudo chi-squared": log(self.pseudo_chisqr),
}
+
return DataFrame.from_dict(
{
"Label": list(statistics.keys()),
@@ -248,6 +258,7 @@ def get_peaks(
"""
indices_re: Indices = self._get_peak_indices(threshold, self.real_gammas)
indices_im: Indices = self._get_peak_indices(threshold, self.imaginary_gammas)
+
return (
self.time_constants[indices_re],
self.real_gammas[indices_re],
@@ -295,8 +306,17 @@ def to_scores_dataframe(
"Real (%)",
"Imag. (%)",
]
- assert isinstance(columns, list), columns
- assert len(columns) == 3
+ elif not isinstance(columns, list):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(columns) != 3:
+ raise ValueError(f"Expected a list with 3 items instead of {len(columns)=}")
+ elif not all(map(lambda s: isinstance(s, str), columns)):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(set(columns)) != 3:
+ raise ValueError(
+ f"Expected a list of 3 unique strings instead of {columns=}"
+ )
+
if rows is None:
rows = [
"Mean",
@@ -306,8 +326,15 @@ def to_scores_dataframe(
"Hellinger distance",
"Jensen-Shannon distance",
]
- assert isinstance(rows, list), rows
- assert len(rows) == 6
+ elif not isinstance(rows, list):
+ raise TypeError(f"Expected a list of strings instead of {rows=}")
+ elif len(rows) != 6:
+ raise ValueError(f"Expected a list with 6 items instead of {len(rows)=}")
+ elif not all(map(lambda s: isinstance(s, str), rows)):
+ raise TypeError(f"Expected a list of strings instead of {rows=}")
+ elif len(set(rows)) != 6:
+ raise ValueError(f"Expected a list of 6 unique strings instead of {rows=}")
+
return DataFrame.from_dict(
{
columns[0]: rows,
@@ -332,13 +359,16 @@ def to_scores_dataframe(
def _compute_res_scores(
- res: NDArray[float64], band: NDArray[float64]
+ res: NDArray[float64],
+ band: NDArray[float64],
) -> NDArray[float64]:
# Count the points fallen inside the 1, 2, and 3 sigma credible bands
count: NDArray[float64] = zeros(3, dtype=float64)
+
i: int
for i in range(3):
count[i] = array_sum(logical_and(res < (i + 1) * band, res > -(i + 1) * band))
+
return count / len(res)
@@ -351,8 +381,10 @@ def _compute_SHD(
# Squared Hellinger distance
sigma_P: NDArray[float64] = sqrt(diag(Sigma_P))
sigma_Q: NDArray[float64] = sqrt(diag(Sigma_Q))
+
sum_cov: NDArray[float64] = sigma_P**2 + sigma_Q**2
prod_cov: NDArray[float64] = sigma_P * sigma_Q
+
return 1.0 - sqrt(2.0 * prod_cov / sum_cov) * exp(
-0.25 * (mu_P - mu_Q) ** 2 / sum_cov
)
@@ -369,21 +401,26 @@ def _compute_JSD(
from scipy.stats import multivariate_normal
JSD: NDArray[float64] = empty_like(mu_P, dtype=float64)
+
i: int
for i in range(mu_P.size):
RV_p = multivariate_normal(mean=mu_P[i], cov=Sigma_P[i, i])
RV_q = multivariate_normal(mean=mu_Q[i], cov=Sigma_Q[i, i])
+
x: NDArray[float64] = RV_p.rvs(num_samples)
p_x: NDArray[float64] = RV_p.pdf(x)
q_x: NDArray[float64] = RV_q.pdf(x)
m_x: NDArray[float64] = (p_x + q_x) / 2.0
+
y: NDArray[float64] = RV_q.rvs(num_samples)
p_y: NDArray[float64] = RV_p.pdf(y)
q_y: NDArray[float64] = RV_q.pdf(y)
m_y: NDArray[float64] = (p_y + q_y) / 2.0
+
dKL_pm: NDArray[float64] = ln(p_x / m_x).mean()
dKL_qm: NDArray[float64] = ln(q_y / m_y).mean()
JSD[i] = 0.5 * (dKL_pm + dKL_qm)
+
return JSD
@@ -399,33 +436,41 @@ def _NMLL_fct(
sigma_beta: NDArray[float64]
sigma_lambda: NDArray[float64]
sigma_n, sigma_beta, sigma_lambda = theta
+
W: NDArray[float64] = (
1 / (sigma_beta**2) * eye(num_taus + 1, dtype=float64)
+ 1 / (sigma_lambda**2) * L.T @ L
)
+
# W = 0.5 * (W.T + W)
K_agm: NDArray[float64] = 1 / (sigma_n**2) * (A.T @ A) + W
+
# K_agm = 0.5 * (K_agm.T + K_agm)
L_W: NDArray[float64] = cholesky(W)
L_agm: NDArray[float64] = cholesky(K_agm)
+
# Compute mu_x
u: NDArray[float64] = solve_linalg(L_agm.T, solve_linalg(L_agm, A.T @ Z))
mu_x: NDArray[float64] = 1 / (sigma_n**2) * u
+
# Compute loss
E_mu_x: NDArray[float64] = 0.5 / (sigma_n**2) * norm(A @ mu_x - Z) ** 2 + 0.5 * (
mu_x.T @ (W @ mu_x)
)
+
val_1: NDArray[float64] = array_sum(ln(diag(L_W)))
val_2: NDArray[float64] = -array_sum(ln(diag(L_agm)))
val_3: NDArray[float64] = -num_freqs / 2.0 * ln(sigma_n**2)
val_4: NDArray[float64] = -E_mu_x
val_5: float = -num_freqs / 2 * ln(2 * pi)
+
return -(val_1 + val_2 + val_3 + val_4 + val_5)
def _compute_A_re(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float64]:
num_freqs: int = w.shape[0]
num_taus: int = tau.shape[0]
+
A_re: NDArray[float64] = zeros(
(
num_freqs,
@@ -434,6 +479,7 @@ def _compute_A_re(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float64
dtype=float64,
)
A_re[:, 0] = 1.0
+
i: int
j: int
for i in range(0, num_freqs):
@@ -446,6 +492,7 @@ def _compute_A_re(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float64
/ (tau[j] if j == 0 else tau[j - 1])
)
)
+
return A_re
@@ -456,6 +503,7 @@ def _compute_A_H_re(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float
def _compute_A_im(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float64]:
num_freqs: int = w.shape[0]
num_taus: int = tau.shape[0]
+
A_im: NDArray[float64] = zeros(
(
num_freqs,
@@ -464,6 +512,7 @@ def _compute_A_im(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float64
dtype=float64,
)
A_im[:, 0] = w
+
i: int
j: int
for i in range(0, num_freqs):
@@ -477,6 +526,7 @@ def _compute_A_im(w: NDArray[float64], tau: NDArray[float64]) -> NDArray[float64
/ (tau[j] if j == 0 else tau[j - 1])
)
)
+
return A_im
@@ -493,52 +543,49 @@ def _compute_L(tau: NDArray[float64], derivative_order: int) -> NDArray[float64]
),
dtype=float64,
)
+
i: int
delta_loc: float
if derivative_order == 1:
for i in range(0, num_taus - 2):
delta_loc = ln(tau[i + 1] / tau[i])
factors: NDArray[float64] = array([1.0, -2.0, 1.0], dtype=float64)
+
if i == 0 or i == num_taus - 3:
factors *= 2
+
L[i, i + 1] = factors[0] / (delta_loc**2)
L[i, i + 2] = factors[1] / (delta_loc**2)
L[i, i + 3] = factors[2] / (delta_loc**2)
+
elif derivative_order == 2:
for i in range(0, num_taus - 2):
delta_loc = ln(tau[i + 1] / tau[i])
+
if i == 0:
L[i, i + 1] = -3.0 / (2 * delta_loc)
L[i, i + 2] = 4.0 / (2 * delta_loc)
L[i, i + 3] = -1.0 / (2 * delta_loc)
+
elif i == num_taus - 2:
L[i, i] = 1.0 / (2 * delta_loc)
L[i, i + 1] = -4.0 / (2 * delta_loc)
L[i, i + 2] = 3.0 / (2 * delta_loc)
+
else:
L[i, i] = 1.0 / (2 * delta_loc)
L[i, i + 2] = -1.0 / (2 * delta_loc)
else:
- raise Exception(f"Unsupported derivative order: {derivative_order}")
- return L
+ raise NotImplementedError(f"Unsupported {derivative_order=}")
-
-def _compute_mu_real_score(
- mu_Z_DRT_re: NDArray[float64],
- mu_Z_H_re: NDArray[float64],
-) -> float:
- return float(
- 1.0 - (norm(mu_Z_DRT_re - mu_Z_H_re) / (norm(mu_Z_DRT_re) + norm(mu_Z_H_re)))
- )
+ return L
-def _compute_mu_imaginary_score(
- mu_Z_DRT_im: NDArray[float64],
- mu_Z_H_im: NDArray[float64],
+def _compute_mu_score(
+ mu_Z_DRT: NDArray[float64],
+ mu_Z_H: NDArray[float64],
) -> float:
- return float(
- 1.0 - (norm(mu_Z_DRT_im - mu_Z_H_im) / (norm(mu_Z_DRT_im) + norm(mu_Z_H_im)))
- )
+ return float(1.0 - (norm(mu_Z_DRT - mu_Z_H) / (norm(mu_Z_DRT) + norm(mu_Z_H))))
def _compute_real_residual_scores(
@@ -551,6 +598,7 @@ def _compute_real_residual_scores(
) -> NDArray[float64]:
res_re: NDArray[float64] = mu_R_inf + mu_Z_H_re - Z_exp.real
band_re: NDArray[float64] = sqrt(cov_R_inf + diag(Sigma_Z_H_re) + sigma_n_im**2)
+
return _compute_res_scores(res_re, band_re)
@@ -567,6 +615,7 @@ def _compute_imaginary_residual_scores(
band_im: NDArray[float64] = sqrt(
(omega**2) * cov_L_0 + diag(Sigma_Z_H_im) + sigma_n_re**2
)
+
return _compute_res_scores(res_im, band_im)
@@ -621,12 +670,14 @@ def _calculate_scores(
# s_mu - distance between means:
mu_Z_DRT_re: NDArray[float64] = out_dict_real["mu_Z_DRT"]
mu_Z_H_re: NDArray[float64] = out_dict_imag["mu_Z_H"]
+ s_mu_re: float = _compute_mu_score(mu_Z_DRT_re, mu_Z_H_re)
+ prog.increment()
+
mu_Z_DRT_im: NDArray[float64] = out_dict_imag["mu_Z_DRT"]
mu_Z_H_im: NDArray[float64] = out_dict_real["mu_Z_H"]
- s_mu_re: float = _compute_mu_real_score(mu_Z_DRT_re, mu_Z_H_re)
- prog.increment()
- s_mu_im: float = _compute_mu_imaginary_score(mu_Z_DRT_im, mu_Z_H_im)
+ s_mu_im: float = _compute_mu_score(mu_Z_DRT_im, mu_Z_H_im)
prog.increment()
+
# s_JSD - Jensen-Shannon Distance:
# we need the means (above) and covariances (below)
# for the computation of the JSD
@@ -634,6 +685,7 @@ def _calculate_scores(
Sigma_Z_DRT_im: NDArray[float64] = out_dict_imag["Sigma_Z_DRT"]
Sigma_Z_H_re: NDArray[float64] = out_dict_imag["Sigma_Z_H"]
Sigma_Z_H_im: NDArray[float64] = out_dict_real["Sigma_Z_H"]
+
# s_res - residual score:
# real part
s_res_re: NDArray[float64] = _compute_real_residual_scores(
@@ -645,6 +697,7 @@ def _calculate_scores(
Sigma_Z_H_re,
)
prog.increment()
+
# imaginary part
s_res_im: NDArray[float64] = _compute_imaginary_residual_scores(
out_dict_imag["mu_gamma"][0],
@@ -656,6 +709,7 @@ def _calculate_scores(
Sigma_Z_H_im,
)
prog.increment()
+
s_HD_re: float = _compute_HD_score(
mu_Z_DRT_re,
Sigma_Z_DRT_re,
@@ -663,6 +717,7 @@ def _calculate_scores(
Sigma_Z_H_re,
)
prog.increment()
+
s_HD_im: float = _compute_HD_score(
mu_Z_DRT_im,
Sigma_Z_DRT_im,
@@ -670,6 +725,7 @@ def _calculate_scores(
Sigma_Z_H_im,
)
prog.increment()
+
s_JSD_re: float = _compute_JSD_score(
mu_Z_DRT_re,
Sigma_Z_DRT_re,
@@ -678,6 +734,7 @@ def _calculate_scores(
num_samples,
)
prog.increment()
+
s_JSD_im: float = _compute_JSD_score(
mu_Z_DRT_im,
Sigma_Z_DRT_im,
@@ -685,6 +742,7 @@ def _calculate_scores(
Sigma_Z_H_im,
num_samples,
)
+
return {
"hellinger_distance": complex(s_HD_re, s_HD_im),
"jensen_shannon_distance": complex(s_JSD_re, s_JSD_im),
@@ -697,31 +755,43 @@ def _calculate_scores(
def _single_hilbert_transform_estimate(
theta_0: NDArray[float64],
- Z_exp: ComplexImpedances,
+ Z_exp: NDArray[float64],
A: NDArray[float64],
A_H: NDArray[float64],
L: NDArray[float64],
num_freqs: int,
num_taus: int,
-):
+) -> dict:
+ import warnings
from scipy.optimize import (
OptimizeResult,
+ OptimizeWarning,
minimize,
)
fp: IO
with open(devnull, "w") as fp:
with redirect_stdout(fp):
- res: OptimizeResult = minimize(
- _NMLL_fct,
- squeeze(theta_0),
- args=(Z_exp, A, L, num_freqs, num_taus),
- options={"gtol": 1e-8, "disp": True},
- )
+ kw: dict
+ if _python_version_info.major == 3 and _python_version_info.minor < 11:
+ kw = {}
+ else:
+ kw = {"category": OptimizeWarning}
+
+ with warnings.catch_warnings(**kw):
+ warnings.simplefilter("ignore")
+ res: OptimizeResult = minimize(
+ _NMLL_fct,
+ squeeze(theta_0),
+ args=(Z_exp, A, L, num_freqs, num_taus),
+ options={"gtol": 1e-8, "disp": True},
+ )
+
sigma_n: float
sigma_beta: float
sigma_lambda: float
sigma_n, sigma_beta, sigma_lambda = res.x
+
# Compute the probability density functions of data regression
# $K_agm = A.T A +\lambda L.T L$
W: NDArray[float64] = (
@@ -729,27 +799,34 @@ def _single_hilbert_transform_estimate(
+ 1 / (sigma_lambda**2) * L.T @ L
)
K_agm: NDArray[float64] = 1 / (sigma_n**2) * (A.T @ A) + W
+
# Cholesky factorization
L_agm: NDArray[float64] = cholesky(K_agm)
- inv_L_agm: NDArray[float64] = invert(L_agm)
+ inv_L_agm: NDArray[float64] = inv(L_agm)
inv_K_agm: NDArray[float64] = inv_L_agm.T @ inv_L_agm
+
# Compute the gamma ~ N(mu_gamma, Sigma_gamma)
Sigma_gamma: NDArray[float64] = inv_K_agm
+ # .real is also in the original pyDRTTools code
mu_gamma: NDArray[float64] = 1 / (sigma_n**2) * (Sigma_gamma @ A.T) @ Z_exp.real
+
# Compute, from gamma, the Z ~ N(mu_Z, Sigma_Z)
mu_Z: NDArray[float64] = A @ mu_gamma
Sigma_Z: NDArray[float64] = A @ (Sigma_gamma @ A.T) + sigma_n**2 * eye(
num_freqs, dtype=float64
)
+
# Compute, from gamma, the Z_DRT ~ N(mu_Z_DRT, Sigma_Z_DRT)
A_DRT: NDArray[float64] = A[:, 1:]
mu_gamma_DRT: NDArray[float64] = mu_gamma[1:]
Sigma_gamma_DRT: NDArray[float64] = Sigma_gamma[1:, 1:]
mu_Z_DRT: NDArray[float64] = A_DRT @ mu_gamma_DRT
Sigma_Z_DRT: NDArray[float64] = A_DRT @ (Sigma_gamma_DRT @ A_DRT.T)
+
# Compute, from gamma, the Z_H_conj ~ N(mu_Z_H_conj, Sigma_Z_H_conj)
mu_Z_H: NDArray[float64] = A_H @ mu_gamma[1:]
Sigma_Z_H: NDArray[float64] = A_H @ (Sigma_gamma[1:, 1:] @ A_H.T)
+
return {
"mu_gamma": mu_gamma,
"Sigma_gamma": Sigma_gamma,
@@ -776,6 +853,7 @@ def _calculate_symmetry_score(
data_real: dict
data_imag: dict
pseudo_chisqr, theta_0, data_real, data_imag = result
+
_, gamma = _x_to_gamma(
data_real["mu_gamma"][1:],
tau_fine,
@@ -783,9 +861,11 @@ def _calculate_symmetry_score(
epsilon,
rbf_type,
)
+
min_gamma: float = abs(min(gamma))
max_gamma: float = abs(max(gamma))
score: float = 1.0 - ((max_gamma - min_gamma) / max(min_gamma, max_gamma))
+
_, gamma = _x_to_gamma(
data_imag["mu_gamma"][1:],
tau_fine,
@@ -793,15 +873,17 @@ def _calculate_symmetry_score(
epsilon,
rbf_type,
)
+
min_gamma = abs(min(gamma))
max_gamma = abs(max(gamma))
score += 1.0 - ((max_gamma - min_gamma) / max(min_gamma, max_gamma))
+
return score / 2.0
def _hilbert_transform_process(
args: tuple,
-) -> Optional[Tuple[float, NDArray[float64], dict, dict]]:
+) -> Union[Optional[Tuple[float, NDArray[float64], dict, dict]], Exception]:
theta_0: NDArray[float64]
w: NDArray[float64]
Z: ComplexImpedances
@@ -844,6 +926,7 @@ def _hilbert_transform_process(
num_freqs,
num_taus,
)
+
theta_0 = data_real["theta"]
data_imag: dict = _single_hilbert_transform_estimate(
theta_0,
@@ -854,10 +937,12 @@ def _hilbert_transform_process(
num_freqs,
num_taus,
)
+
mu_R_inf: float = data_real["mu_gamma"][0]
mu_Z_H_re: NDArray[float64] = data_imag["mu_Z_H"]
mu_L_0: float = data_imag["mu_gamma"][0]
mu_Z_H_im: NDArray[float64] = data_real["mu_Z_H"]
+
Z_fit: ComplexImpedances = array(
list(
map(
@@ -871,6 +956,7 @@ def _hilbert_transform_process(
dtype=ComplexImpedance,
)
pseudo_chisqr: float = _calculate_pseudo_chisqr(Z_exp=Z, Z_fit=Z_fit)
+
result: Tuple[float, NDArray[float64], dict, dict] = (
pseudo_chisqr,
theta_0,
@@ -878,7 +964,7 @@ def _hilbert_transform_process(
data_imag,
)
if (
- _calculate_symmetry_score(
+ maximum_symmetry > 0.0 and _calculate_symmetry_score(
result,
tau_fine,
tau,
@@ -889,14 +975,15 @@ def _hilbert_transform_process(
):
# The result is most likely poor (lots of strong oscillation).
return None
+
return (
pseudo_chisqr,
theta_0,
data_real,
data_imag,
)
- except Exception:
- return None
+ except Exception as err:
+ return err
def _perform_attempts(
@@ -919,6 +1006,7 @@ def _perform_attempts(
) -> Tuple[float, NDArray[float64], dict, dict]:
L: NDArray[float64] = _compute_L(tau, derivative_order)
theta_0_generator: Callable = lambda: 10 ** (6 * rand(3, 1) - 3)
+
args = (
(
theta_0_generator(),
@@ -939,28 +1027,40 @@ def _perform_attempts(
)
for _ in range(0, num_attempts)
)
+
results: List[Tuple[float, NDArray[float64], dict, dict]] = []
+ errors: List[Exception] = []
+
prog: Progress
with Progress("Calculating Hilbert transforms", total=num_attempts + 1) as prog:
if num_procs > 1:
with Pool(num_procs) as pool:
- for i, res in enumerate(
- pool.imap_unordered(
- _hilbert_transform_process,
- args,
- )
+ for res in pool.imap_unordered(
+ _hilbert_transform_process,
+ args,
):
prog.increment()
- if res is not None:
+ if isinstance(res, Exception):
+ errors.append(res)
+ elif res is not None:
results.append(res)
+
else:
- for i, res in enumerate(map(_hilbert_transform_process, args)):
+ for res in map(_hilbert_transform_process, args):
prog.increment()
- if res is not None:
+ if isinstance(res, Exception):
+ errors.append(res)
+ elif res is not None:
results.append(res)
+
if len(results) == 0:
- raise DRTError("Failed to perform calculations! Try tweaking the settings.")
+ if len(errors) > 0:
+ raise errors.pop(0)
+ else:
+ raise DRTError("Failed to perform calculations! Try tweaking the settings.")
+
results.sort(key=lambda _: _[0])
+
return results[0]
@@ -972,12 +1072,15 @@ def _calculate_model_impedance(
# Real part
mu_R_inf: float = data_real["mu_gamma"][0]
mu_Z_H_im: NDArray[float64] = data_real["mu_Z_H"]
+
# Imaginary part
mu_L_0: float = data_imag["mu_gamma"][0]
mu_Z_H_re: NDArray[float64] = data_imag["mu_Z_H"]
+
# Means and bounds
mu_Z_H_re_agm: NDArray[float64] = mu_R_inf + mu_Z_H_re
mu_Z_H_im_agm: NDArray[float64] = w * mu_L_0 + mu_Z_H_im
+
return array(
list(
map(
@@ -992,6 +1095,7 @@ def _calculate_model_impedance(
)
+# TODO: Add support for admittance?
def calculate_drt_bht(
data: DataSet,
rbf_type: str = "gaussian",
@@ -1001,7 +1105,7 @@ def calculate_drt_bht(
num_samples: int = 2000,
num_attempts: int = 10,
maximum_symmetry: float = 0.5,
- num_procs: int = 0,
+ num_procs: int = -1,
**kwargs,
) -> BHTResult:
"""
@@ -1071,44 +1175,61 @@ def calculate_drt_bht(
-------
BHTResult
"""
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert type(rbf_type) is str, rbf_type
- if rbf_type not in _RBF_TYPES:
- raise DRTError("Valid rbf_type values: '" + "', '".join(_RBF_TYPES))
- assert issubdtype(type(derivative_order), integer), derivative_order
- if not (1 <= derivative_order <= 2):
- raise DRTError("Valid derivative_order values: 1, 2")
- assert type(rbf_shape) is str, rbf_shape
- if rbf_shape not in _RBF_SHAPES:
- raise DRTError("Valid rbf_shape values: '" + "', '".join(_RBF_SHAPES))
- assert issubdtype(type(shape_coeff), floating), shape_coeff
- if shape_coeff <= 0.0:
- raise DRTError("The shape coefficient must be greater than 0.0!")
- assert issubdtype(type(num_samples), integer), num_samples
- if num_samples < 1:
- raise DRTError("The number of samples must be greater than ")
- assert issubdtype(type(num_attempts), integer), num_attempts
- if num_attempts < 1:
- raise DRTError("The number of attempts must be greater than 0!")
- assert issubdtype(type(maximum_symmetry), floating), maximum_symmetry
- if not (0.0 <= maximum_symmetry <= 1.0):
- raise DRTError("The maximum symmetry must be between 0.0 and 1.0 (inclusive)!")
- assert issubdtype(type(num_procs), integer), num_procs
- if num_procs < 1:
- num_procs = _get_default_num_procs() - abs(num_procs)
- if num_procs < 1:
- num_procs = 1
+ if not isinstance(rbf_type, str):
+ raise TypeError(f"Expected a string instead of {rbf_type=}")
+ elif rbf_type not in _RBF_TYPES:
+ raise ValueError("Valid rbf_type values: '" + "', '".join(_RBF_TYPES))
+
+ if not _is_integer(derivative_order):
+ raise TypeError(f"Expected an integer instead of {derivative_order=}")
+ elif not (1 <= derivative_order <= 2):
+ raise ValueError("Valid derivative_order values: 1, 2")
+
+ if not isinstance(rbf_shape, str):
+ raise TypeError(f"Expected a string instead of {rbf_shape=}")
+ elif rbf_shape not in _RBF_SHAPES:
+ raise ValueError("Valid rbf_shape values: '" + "', '".join(_RBF_SHAPES))
+
+ if not _is_floating(shape_coeff):
+ raise TypeError(f"Expected a float instead of {shape_coeff=}")
+ elif shape_coeff <= 0.0:
+ raise ValueError("The shape coefficient must be greater than 0.0")
+
+ if not _is_integer(num_samples):
+ raise TypeError(f"Expected an integer instead of {num_samples=}")
+ elif num_samples < 1:
+ raise ValueError("The number of samples must be greater than 0")
+
+ if not _is_integer(num_attempts):
+ raise TypeError(f"Expected an integer instead of {num_attempts=}")
+ elif num_attempts < 1:
+ raise ValueError("The number of attempts must be greater than 0")
+
+ if not _is_floating(maximum_symmetry):
+ raise TypeError(f"Expected a float instead of {maximum_symmetry=}")
+ elif not (0.0 <= maximum_symmetry <= 1.0):
+ raise ValueError("The maximum symmetry must be in the range [0.0, 1.0]")
+
+ if not _is_integer(num_procs):
+ raise TypeError(f"Expected an integer instead of {num_procs=}")
+ elif num_procs < 1:
+ num_procs = max((_get_default_num_procs() - abs(num_procs), 1))
+
prog: Progress
with Progress("Preparing matrices", total=5) as prog:
f: Frequencies = data.get_frequencies()
- Z_exp: ComplexImpedances = data.get_impedances()
+ if len(f) < 1:
+ raise ValueError(
+ f"There are no unmasked data points in the '{data.get_label()}' data set parsed from '{data.get_path()}'"
+ )
+
tau: NDArray[float64] = 1 / f
tau_fine: NDArray[float64] = logspace(
log(tau.min()) - 0.5,
log(tau.max()) + 0.5,
10 * f.shape[0],
)
+
w: NDArray[float64] = 2 * pi * f
epsilon: float = _compute_epsilon(
f,
@@ -1116,14 +1237,20 @@ def calculate_drt_bht(
shape_coeff,
rbf_type,
)
+
A_re: NDArray[float64] = _compute_A_re(w, tau)
prog.increment()
+
A_im: NDArray[float64] = _compute_A_im(w, tau)
prog.increment()
+
A_H_re: NDArray[float64] = _compute_A_H_re(w, tau)
prog.increment()
+
A_H_im: NDArray[float64] = _compute_A_H_im(w, tau)
prog.increment()
+
+ Z_exp: ComplexImpedances = data.get_impedances()
theta_0: NDArray[float64]
data_real: dict
data_imag: dict
@@ -1145,6 +1272,7 @@ def calculate_drt_bht(
num_attempts,
num_procs,
)
+
# Scores seem to be fine based on comparison with the ZARC example used in the article
scores: dict = _calculate_scores(
theta_0,
@@ -1156,6 +1284,7 @@ def calculate_drt_bht(
)
prog.set_message("Calculating model impedance")
Z_fit: ComplexImpedances = _calculate_model_impedance(w, data_real, data_imag)
+
time_constants: TimeConstants
time_constants, real_gammas = _x_to_gamma(
data_real["mu_gamma"][1:],
@@ -1172,6 +1301,7 @@ def calculate_drt_bht(
epsilon,
rbf_type,
)
+
return BHTResult(
time_constants=time_constants,
real_gammas=real_gammas,
diff --git a/src/pyimpspec/analysis/drt/mrq_fit.py b/src/pyimpspec/analysis/drt/mrq_fit.py
index 2cc6c49..c131489 100644
--- a/src/pyimpspec/analysis/drt/mrq_fit.py
+++ b/src/pyimpspec/analysis/drt/mrq_fit.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,23 +22,12 @@
# 10.1016/j.ssi.2016.10.009
from dataclasses import dataclass
-from typing import (
- Dict,
- List,
- Optional,
- Tuple,
- Type,
- Union,
-)
from numpy import (
cos,
cosh,
exp,
float64,
- floating,
- integer,
isclose,
- issubdtype,
log as ln,
log10 as log,
pi,
@@ -84,6 +73,15 @@
Indices,
TimeConstants,
)
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+ _is_floating,
+)
@dataclass(frozen=True)
@@ -121,10 +119,12 @@ class MRQFitResult(DRTResult):
@staticmethod
def _generate_label(circuit: Circuit) -> str:
label_fragments: List[str] = []
- series: Union[Element, Connection] = circuit.get_elements(flattened=False)[0]
- assert isinstance(series, Series)
+ series: Series = circuit.get_connections()[0]
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
elem_or_con: Union[Element, Connection]
- for elem_or_con in series.get_elements(flattened=False):
+ for elem_or_con in series:
if isinstance(elem_or_con, Parallel):
element: Union[Element, Connection]
for element in elem_or_con.get_elements():
@@ -133,11 +133,19 @@ def _generate_label(circuit: Circuit) -> str:
elif isinstance(element, ConstantPhaseElement):
label_fragments.append("(RQ)")
elif isinstance(elem_or_con, Resistor):
- assert "R" not in label_fragments
+ if "R" in label_fragments:
+ raise ValueError(f"'R' is already in {label_fragments=}")
+
label_fragments.append("R")
- assert label_fragments.count("R") <= 1
+
+ if label_fragments.count("R") > 1:
+ raise ValueError(
+ f"There should only be a maximum of 1 'R' in {label_fragments=}"
+ )
+
num_RQ: int = label_fragments.count("(RQ)")
num_RC: int = label_fragments.count("(RC)")
+
label: str = "R" * label_fragments.count("R")
if num_RQ == 1:
label += "(RQ)"
@@ -147,6 +155,7 @@ def _generate_label(circuit: Circuit) -> str:
label += ("-" if num_RQ > 1 else "") + "(RC)"
elif num_RC > 1:
label += f"-{num_RC}(RC)"
+
return label
def get_label(self) -> str:
@@ -180,6 +189,7 @@ def get_peaks(self, threshold: float = 0.0) -> Tuple[TimeConstants, Gammas]:
threshold,
self.gammas, # type: ignore
)
+
return (
self.time_constants[indices], # type: ignore
self.gammas[indices], # type: ignore
@@ -210,12 +220,22 @@ def to_peaks_dataframe(
"tau (s)",
"gamma (ohm)",
]
- assert isinstance(columns, list), columns
- assert len(columns) == 2
+ elif not isinstance(columns, list):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(columns) != 2:
+ raise ValueError(f"Expected a list with 2 items instead of {len(columns)=}")
+ elif not all(map(lambda s: isinstance(s, str), columns)):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(set(columns)) != 2:
+ raise ValueError(
+ f"Expected a list of 2 unique strings instead of {columns=}"
+ )
+
indices: Indices = self._get_peak_indices(
threshold,
self.gammas, # type: ignore
)
+
return DataFrame.from_dict(
{
columns[0]: self.time_constants[indices], # type: ignore
@@ -231,6 +251,7 @@ def to_statistics_dataframe(
statistics: Dict[str, Union[int, float, str]] = {
"Log pseudo chi-squared": log(self.pseudo_chisqr),
}
+
return DataFrame.from_dict(
{
"Label": list(statistics.keys()),
@@ -256,20 +277,25 @@ def _validate_connections(connections: List[Connection]):
# Top-level series connection
if not isinstance(connections[0], Series):
raise DRTError("Invalid circuit: expected a top-level series connection!")
+
series: Connection = connections.pop(0)
elements_or_connections: List[Union[Type[Element], Type[Connection]]]
- elements_or_connections = list(map(type, series.get_elements(flattened=False)))
+ elements_or_connections = list(map(type, series))
+
if elements_or_connections.count(Resistor) > 1:
raise DRTError(
"Invalid circuit: only one optional series resistance is allowed!"
)
+
if elements_or_connections.count(Parallel) < 1:
raise DRTError("Invalid circuit: expected at least one parallel connection!")
+
for elem_or_con in elements_or_connections:
if elem_or_con is not Resistor and elem_or_con is not Parallel:
raise DRTError(
"Invalid circuit: unsupported element in the top-level series connection!"
)
+
# Parallel connections (i.e., (RC) and (RQ))
con: Connection
for con in connections:
@@ -277,19 +303,24 @@ def _validate_connections(connections: List[Connection]):
raise DRTError(
"Invalid circuit: no series connections other than the top-level series connection are allowed!"
)
+
if len(con.get_connections()) > 1:
raise DRTError(
"Invalid circuit: nested connections are not allowed within the parallel connections!"
)
+
elements: List[Type[Element]] = list(map(type, con.get_elements()))
+
if len(elements) != 2:
raise DRTError(
"Invalid circuit: the parallel connections may only contain two elements!"
)
+
if Resistor not in elements:
raise DRTError(
"Invalid circuit: the parallel connections must contain a resistor!"
)
+
if not (Capacitor in elements or ConstantPhaseElement in elements):
raise DRTError(
"Invalid circuit: the parallel connections must contain either a capacitor or a constant phase element!"
@@ -304,26 +335,34 @@ def _validate_circuit(circuit: Circuit):
def _adjust_initial_values(circuit: Circuit, data: DataSet) -> Circuit:
f: Frequencies = data.get_frequencies()
Z_exp: ComplexImpedances = data.get_impedances()
+
connections: List[Connection] = circuit.get_connections()
series: Connection = connections.pop(0)
- assert isinstance(series, Series), series
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
element: Union[Element, Connection]
- for element in series.get_elements(flattened=False):
+ for element in series:
if isinstance(element, Resistor):
if not element.is_fixed("R"):
element.set_values(R=Z_exp[0].real)
+
num_parallels: int = len(connections)
R_frac: float = (max(Z_exp.real) - min(Z_exp.real)) / num_parallels
i: int = num_parallels + 1
+
parallel: Connection
for parallel in connections:
- assert isinstance(parallel, Parallel)
+ if not isinstance(parallel, Parallel):
+ raise TypeError(f"Expected a Parallel instead of {parallel=}")
+
for element in parallel.get_elements():
if isinstance(element, Resistor):
if not element.is_fixed("R"):
element.set_values(R=R_frac)
continue
i -= 1
+
if isinstance(element, Capacitor):
if not element.is_fixed("C"):
element.set_values(
@@ -341,6 +380,7 @@ def _adjust_initial_values(circuit: Circuit, data: DataSet) -> Circuit:
)
/ R_frac
)
+
return circuit
@@ -353,17 +393,27 @@ def _calculate_tau_gamma(
tau: NDArray[float64] = 1 / (_interpolate(f, num_per_decade=num_per_decade))
gamma: NDArray[float64] = zeros(tau.shape, dtype=float64)
connections: List[Connection] = circuit.get_connections()
- assert isinstance(connections.pop(0), Series)
+
+ series: Connection = connections.pop(0)
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
parallel: Connection
for parallel in connections:
- assert isinstance(parallel, Parallel)
+ if not isinstance(parallel, Parallel):
+ raise TypeError(f"Expected a Parallel instead of {parallel=}")
+
parameters: Dict[str, float] = {}
+
element: Element
- for element in parallel.get_elements(flattened=True):
+ for element in parallel.get_elements(recursive=True):
parameters.update(element.get_values())
+
R: float = parameters["R"]
Y: Optional[float] = parameters.get("Y", parameters.get("C"))
- assert Y is not None
+ if not _is_floating(Y):
+ raise TypeError(f"Expected a float instead of {Y=}")
+
n: float = parameters.get("n", 1.0)
tau_0: float = (R * Y) ** (1.0 / n)
if isclose(n, 1.0, atol=1e-2):
@@ -374,6 +424,7 @@ def _calculate_tau_gamma(
* (sin((1 - n) * pi))
/ (cosh(n * ln(tau / tau_0)) - cos((1 - n) * pi))
)
+
return (
tau,
gamma,
@@ -382,10 +433,12 @@ def _calculate_tau_gamma(
def _generate_label(circuit: Circuit) -> str:
label_fragments: List[str] = []
- series: Union[Element, Connection] = circuit.get_elements(flattened=False)[0]
- assert isinstance(series, Series)
+ series: Series = circuit.get_connections()[0]
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
elem_or_con: Union[Element, Connection]
- for elem_or_con in series.get_elements(flattened=False):
+ for elem_or_con in series:
if isinstance(elem_or_con, Parallel):
element: Union[Element, Connection]
for element in elem_or_con.get_elements():
@@ -393,21 +446,32 @@ def _generate_label(circuit: Circuit) -> str:
label_fragments.append("(RC)")
elif isinstance(element, ConstantPhaseElement):
label_fragments.append("(RQ)")
+
elif isinstance(elem_or_con, Resistor):
- assert "R" not in label_fragments
+ if "R" in label_fragments:
+ raise ValueError(f"'R' is already in {label_fragments}")
+
label_fragments.append("R")
- assert label_fragments.count("R") <= 1
+
+ if label_fragments.count("R") > 1:
+ raise ValueError(
+ f"There should only be a maximum of 1 'R' in {label_fragments=}"
+ )
+
num_RQ: int = label_fragments.count("(RQ)")
num_RC: int = label_fragments.count("(RC)")
+
label: str = "R" * label_fragments.count("R")
if num_RQ == 1:
label += "(RQ)"
elif num_RQ > 1:
label += f"-{num_RQ}(RQ)"
+
if num_RC == 1:
label += ("-" if num_RQ > 1 else "") + "(RC)"
elif num_RC > 1:
label += f"-{num_RC}(RC)"
+
return label
@@ -418,7 +482,7 @@ def calculate_drt_mrq_fit(
gaussian_width: float = 0.15,
num_per_decade: int = 100,
max_nfev: int = -1,
- num_procs: int = 0,
+ num_procs: int = -1,
**kwargs,
) -> MRQFitResult:
"""
@@ -460,22 +524,28 @@ def calculate_drt_mrq_fit(
-------
MRQFitResult
"""
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert isinstance(data, DataSet), type(data)
- assert isinstance(circuit, Circuit), type(circuit)
- if fit is not None:
- assert isinstance(fit, FitResult) or hasattr(fit, "circuit"), fit
- assert issubdtype(type(gaussian_width), floating), gaussian_width
- if gaussian_width <= 0.0:
- raise DRTError("The Gaussian width must be greater than 0.0!")
- assert issubdtype(type(num_procs), integer), (type(num_procs), num_procs)
+ if not isinstance(circuit, Circuit):
+ raise TypeError(f"Expected a Circuit instead of {circuit=}")
+
+ if fit is not None and not hasattr(fit, "circuit"):
+ raise TypeError(
+ f"Expected None or an object with a 'circuit' property instead of {fit=}"
+ )
+
+ if not _is_floating(gaussian_width):
+ raise TypeError(f"Expected a float instead of {gaussian_width=}")
+ elif gaussian_width <= 0.0:
+ raise ValueError("The Gaussian width must be greater than 0.0!")
+
prog: Progress
with Progress("Validating circuit", total=3) as prog:
_validate_circuit(circuit)
prog.increment()
if fit is not None:
- assert fit.circuit is circuit
+ if fit.circuit is not circuit:
+ raise ValueError(
+ f"Expected {circuit=} and {fit.circuit=} to be the same object"
+ )
else:
fit = fit_circuit(
_adjust_initial_values(
@@ -486,19 +556,33 @@ def calculate_drt_mrq_fit(
max_nfev=max_nfev,
num_procs=num_procs,
)
+ fit = fit_circuit(
+ fit.circuit,
+ data,
+ max_nfev=max_nfev,
+ num_procs=num_procs,
+ )
circuit = fit.circuit
+
prog.increment()
prog.set_message("Calculating DRT")
+ f: Frequencies = data.get_frequencies()
+ if len(f) < 1:
+ raise ValueError(
+ f"There are no unmasked data points in the '{data.get_label()}' data set parsed from '{data.get_path()}'"
+ )
+
tau: TimeConstants
gamma: Gammas
tau, gamma = _calculate_tau_gamma(
- circuit,
- data.get_frequencies(),
- gaussian_width,
- num_per_decade,
+ circuit=circuit,
+ f=f,
+ W=gaussian_width,
+ num_per_decade=num_per_decade,
)
- f: Frequencies = data.get_frequencies()
+
Z_fit: ComplexImpedances = simulate_spectrum(circuit, f).get_impedances()
+
return MRQFitResult(
time_constants=tau,
gammas=gamma,
diff --git a/src/pyimpspec/analysis/drt/result.py b/src/pyimpspec/analysis/drt/result.py
index 0b5f6ec..b3898d0 100644
--- a/src/pyimpspec/analysis/drt/result.py
+++ b/src/pyimpspec/analysis/drt/result.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,17 +22,10 @@
abstractmethod,
)
from dataclasses import dataclass
-from typing import (
- List,
- Optional,
- Tuple,
-)
from numpy import (
angle,
array,
- floating,
int64,
- issubdtype,
)
from pyimpspec.typing import (
ComplexImpedances,
@@ -45,6 +38,12 @@
Residuals,
TimeConstants,
)
+from pyimpspec.typing.helpers import (
+ List,
+ Optional,
+ Tuple,
+ _is_floating,
+)
@dataclass(frozen=True)
@@ -139,15 +138,19 @@ def to_peaks_dataframe(
def _get_peak_indices(self, threshold: float, gamma: Gammas) -> Indices:
from scipy.signal import find_peaks
- assert issubdtype(type(threshold), floating), threshold
- assert 0.0 <= threshold <= 1.0, threshold
- indices: Indices
- indices, _ = find_peaks(gamma)
+ if not _is_floating(threshold):
+ raise TypeError(f"Expected a float instead of {threshold=}")
+ elif not (0.0 <= threshold <= 1.0):
+ raise ValueError(f"Expected a value in the range [0.0, 1.0] instead of {threshold=}")
+
+ indices: Indices = find_peaks(gamma)[0]
if not indices.any():
return array([], dtype=int64)
+
max_g: float = max(gamma)
if max_g == 0.0:
return array([], dtype=int64)
+
return array(
list(
filter(
diff --git a/src/pyimpspec/analysis/drt/tr_nnls.py b/src/pyimpspec/analysis/drt/tr_nnls.py
index 10cd7b5..d9a803a 100644
--- a/src/pyimpspec/analysis/drt/tr_nnls.py
+++ b/src/pyimpspec/analysis/drt/tr_nnls.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,19 +23,11 @@
# DRT-python-code commit: 9663ed8b331f521a9fcdb0b58fb2b34693df938c
from dataclasses import dataclass
-from typing import (
- Dict,
- List,
- Optional,
- Tuple,
- Union,
-)
from numpy import (
array,
- floating,
float64,
+ fromiter,
identity,
- issubdtype,
int64,
log as ln,
log10 as log,
@@ -49,7 +41,6 @@
from numpy.linalg import norm
from numpy.typing import NDArray
from pyimpspec.data import DataSet
-from pyimpspec.exceptions import DRTError
from pyimpspec.analysis.utility import (
_calculate_residuals,
_calculate_pseudo_chisqr,
@@ -64,6 +55,14 @@
Indices,
TimeConstants,
)
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ _is_floating,
+)
from .utility import _l_curve_corner_search
@@ -124,12 +123,22 @@ def to_peaks_dataframe(
"tau (s)",
"gamma (ohm)",
]
- assert isinstance(columns, list), columns
- assert len(columns) == 2
+ elif not isinstance(columns, list):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(columns) != 2:
+ raise ValueError(f"Expected a list with 2 items instead of {len(columns)=}")
+ elif not all(map(lambda s: isinstance(s, str), columns)):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(set(columns)) != 2:
+ raise ValueError(
+ f"Expected a list of 2 unique strings instead of {columns=}"
+ )
+
indices: Indices = self._get_peak_indices(
threshold,
self.gammas, # type: ignore
)
+
return DataFrame.from_dict(
{
columns[0]: self.time_constants[indices], # type: ignore
@@ -146,6 +155,7 @@ def to_statistics_dataframe(
"Log pseudo chi-squared": log(self.pseudo_chisqr),
"Lambda": self.lambda_value,
}
+
return DataFrame.from_dict(
{
"Label": list(statistics.keys()),
@@ -171,6 +181,7 @@ def get_peaks(self, threshold: float = 0.0) -> Tuple[TimeConstants, Gammas]:
threshold,
self.gammas, # type: ignore
)
+
return (
self.time_constants[indices], # type: ignore
self.gammas[indices], # type: ignore
@@ -196,11 +207,14 @@ def get_drt_data(self) -> Tuple[TimeConstants, Gammas]:
def _calculate_delta_ln_tau(tau: TimeConstants) -> NDArray[float64]:
ln_tau: NDArray[float64] = ln(tau)
delta_ln_tau: NDArray[float64] = zeros(tau.size, dtype=float64)
+
i: int
for i in range(1, tau.size - 1):
delta_ln_tau[i] = 0.5 * (ln_tau[i + 1] - ln_tau[i - 1])
+
delta_ln_tau[0] = 0.5 * (ln_tau[1] - ln_tau[0])
delta_ln_tau[-1] = 0.5 * (ln_tau[-1] - ln_tau[-2])
+
return delta_ln_tau
@@ -209,8 +223,10 @@ def _normalize_impedance(
) -> Tuple[ComplexImpedances, float, float]:
R_inf: float = Z[0].real # High-frequency resistance
Z_norm: ComplexImpedances = Z - R_inf
+
R_pol: float = Z_norm[-1].real - Z_norm[0].real
Z_norm /= R_pol
+
return (
Z_norm,
R_inf,
@@ -231,10 +247,12 @@ def _generate_A_matrix(
),
dtype=float64,
)
+
product: NDArray[float64]
for i in range(0, omega.size):
product = omega[i] * tau
A[i, :] = (product if is_imaginary else 1) * delta_ln_tau / (1 + product**2)
+
return A
@@ -284,12 +302,14 @@ def _test_lambda_values(
total=len(lambda_values) + 1,
) as prog:
solution_norms: NDArray[float64] = zeros(lambda_values.size, dtype=float64)
+
i: int
for i, lambda_value in enumerate(lambda_values):
A_tikh: NDArray[float64] = _generate_tikhonov_matrix(A, I, lambda_value)
g_tau: NDArray[float64] = _solve(A_tikh, b)
solution_norms[i] = sqrt(array_sum(g_tau**2))
prog.increment()
+
return (
lambda_values,
solution_norms,
@@ -302,6 +322,7 @@ def _reduce_points_by_radius(
r: float,
) -> Tuple[NDArray[float64], NDArray[float64]]:
raw_indices: List[int] = [0]
+
i: int = 0
while i < x.size - 1:
i += 1
@@ -309,7 +330,9 @@ def _reduce_points_by_radius(
if ((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2) ** (1 / 2) < r:
continue
raw_indices.append(i)
+
indices: Indices = array(raw_indices, dtype=int64)
+
return (
x[indices],
y[indices],
@@ -325,13 +348,16 @@ def _suggest_lambda(
solution_norms,
r=2e-2,
)
+
n: int = 5
m1: float
c1: float
(m1, c1) = polyfit(lambda_values[:n], solution_norms[:n], deg=1)
+
m2: float
c2: float
(m2, c2) = polyfit(lambda_values[-n:], solution_norms[-n:], deg=1)
+
return (c2 - c1) / (m1 - m2)
@@ -347,24 +373,30 @@ def _generate_model_impedance(
is_imaginary: bool,
) -> ComplexImpedances:
Z_re_im: NDArray[float64] = zeros(omega.size, dtype=float64)
+
+ i: int
for i in range(0, omega.size):
product = omega[i] * tau
Z_re_im[i] = array_sum(
delta_ln_tau
* ((product if is_imaginary else 1) * g_tau / (1 + product**2))
)
+
Z_re_im = R_pol * Z_re_im
+
if is_imaginary:
- return array(
- list(map(lambda _: complex(*_), zip(Z.real, -Z_re_im))),
- dtype=ComplexImpedance,
- )
- else:
- return array(
- list(map(lambda _: complex(*_), zip(Z_re_im + R_inf, Z.imag))),
+ return fromiter(
+ map(lambda _: complex(*_), zip(Z.real, -Z_re_im)),
dtype=ComplexImpedance,
+ count=len(Z),
)
+ return fromiter(
+ map(lambda _: complex(*_), zip(Z_re_im + R_inf, Z.imag)),
+ dtype=ComplexImpedance,
+ count=len(Z),
+ )
+
def _l_curve_P(
lambda_value: float,
@@ -374,6 +406,7 @@ def _l_curve_P(
) -> Tuple[float64, float64]:
A_tikh: NDArray[float64] = _generate_tikhonov_matrix(A, I, lambda_value)
g_tau: NDArray[float64] = _solve(A_tikh, b)
+
return (
log(norm(A_tikh @ g_tau - b) ** 2),
log(norm(g_tau) ** 2),
@@ -414,27 +447,29 @@ def calculate_drt_tr_nnls(
-------
TRNNLSResult
"""
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert (
- hasattr(data, "get_frequencies")
- and callable(data.get_frequencies)
- and hasattr(data, "get_impedances")
- and callable(data.get_impedances)
- ), data
- assert type(mode) is str, mode
- if mode not in _MODES:
- raise DRTError("Valid mode values: '" + "', '".join(_MODES))
- assert issubdtype(type(lambda_value), floating), lambda_value
+ if not isinstance(mode, str):
+ raise TypeError(f"Expected a string instead of {mode=}")
+ elif mode not in _MODES:
+ raise ValueError("Valid mode values: '" + "', '".join(_MODES))
+
+ if not _is_floating(lambda_value):
+ raise TypeError(f"Expected a float instead of {lambda_value=}")
+
prog: Progress
with Progress("Preparing matrices", total=6) as prog:
is_imaginary: bool = mode == "imaginary"
f: Frequencies = data.get_frequencies()
+ if len(f) < 1:
+ raise ValueError(
+ f"There are no unmasked data points in the '{data.get_label()}' data set parsed from '{data.get_path()}'"
+ )
+
Z_exp: ComplexImpedances = data.get_impedances()
omega: NDArray[float64] = 2 * pi * f
tau: TimeConstants = 1 / f
delta_ln_tau: NDArray[float64] = _calculate_delta_ln_tau(tau)
prog.increment()
+
Z_norm: ComplexImpedances
R_inf: float
R_pol: float
@@ -443,8 +478,10 @@ def calculate_drt_tr_nnls(
I: NDArray[float64] = identity(omega.size, dtype=int64)
A: NDArray[float64] = _generate_A_matrix(omega, tau, delta_ln_tau, is_imaginary)
prog.increment()
+
b: NDArray[float64] = _generate_b_vector(A, Z_norm, is_imaginary)
prog.increment()
+
A_tikh: NDArray[float64]
g_tau: NDArray[float64]
# Try to determine a suitable regularization parameter if one hasn't
@@ -455,6 +492,7 @@ def calculate_drt_tr_nnls(
minimum=1e-10,
maximum=1,
)
+
elif lambda_value <= 0.0:
lambda_value = _suggest_lambda(
*_test_lambda_values(
@@ -468,11 +506,14 @@ def calculate_drt_tr_nnls(
),
),
)
+
prog.set_message("Calculating DRT")
A_tikh = _generate_tikhonov_matrix(A, I, lambda_value)
prog.increment()
+
g_tau = _solve(A_tikh, b)
prog.increment()
+
# R_pol_synthetic: float = array_sum(g_tau * delta_ln_tau) # Should be (close to) 1.0
Z_fit: ComplexImpedances = _generate_model_impedance(
omega,
@@ -486,6 +527,7 @@ def calculate_drt_tr_nnls(
is_imaginary,
)
gamma: Gammas = g_tau * R_pol
+
return TRNNLSResult(
time_constants=tau,
gammas=gamma,
diff --git a/src/pyimpspec/analysis/drt/tr_rbf.py b/src/pyimpspec/analysis/drt/tr_rbf.py
index 0333a53..80faabd 100644
--- a/src/pyimpspec/analysis/drt/tr_rbf.py
+++ b/src/pyimpspec/analysis/drt/tr_rbf.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -18,30 +18,22 @@
# the LICENSES folder.
# This module uses Tikhonov regularization and either radial basis function or piecewise linear discretization
-# 10.1016/j.electacta.2015.09.097
+# - 10.1016/j.electacta.2015.09.097
# - 10.1016/j.electacta.2015.03.123
# - 10.1016/j.electacta.2017.07.050
# Based on code from https://github.com/ciuccislab/pyDRTtools.
-# pyDRTtools commit: 65ea54d9332a0c6594de852f0242a88e20ec4427
+# pyDRTtools commit: 3694b9b4cef9b29d623bef7300280810ec351d46
from dataclasses import dataclass
+from time import time
from multiprocessing import Pool
-from multiprocessing.context import TimeoutError as MPTimeoutError
-from time import sleep
-from typing import (
- Callable,
- Dict,
- List,
- Optional,
- Tuple,
- Union,
-)
from numpy import (
abs as array_abs,
arccos,
arctan2,
argmin,
array,
+ ceil,
concatenate,
cos,
cumsum,
@@ -51,11 +43,8 @@
eye,
finfo,
float64,
- floating,
inf,
int64,
- integer,
- issubdtype,
log as ln,
log10 as log,
logspace,
@@ -70,12 +59,15 @@
square,
std,
sum as array_sum,
+ trace,
where,
zeros,
)
from numpy.linalg import (
cholesky,
norm,
+ inv,
+ solve,
)
from numpy.matlib import repmat
from numpy.random import randn
@@ -99,7 +91,22 @@
TimeConstant,
TimeConstants,
)
-from .utility import _l_curve_corner_search
+from pyimpspec.typing.helpers import (
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ _is_boolean,
+ _is_integer,
+ _is_floating,
+ _is_floating_array,
+)
+from .utility import (
+ _is_positive_definite,
+ _nearest_positive_definite,
+)
_SOLVER_IMPORTED: bool = False
@@ -168,6 +175,7 @@ def get_drt_credible_intervals_data(
array([], dtype=Gamma),
array([], dtype=Gamma),
)
+
return (
self.time_constants,
self.mean_gammas,
@@ -193,6 +201,7 @@ def get_drt_data(self) -> Tuple[TimeConstants, Gammas]:
-------
Tuple[|TimeConstants|, |Gammas|]
"""
+
return (
self.time_constants, # type: ignore
self.gammas, # type: ignore
@@ -216,6 +225,7 @@ def get_peaks(self, threshold: float = 0.0) -> Tuple[TimeConstants, Gammas]:
threshold,
self.gammas, # type: ignore
)
+
return (
self.time_constants[indices], # type: ignore
self.gammas[indices], # type: ignore
@@ -233,12 +243,22 @@ def to_peaks_dataframe(
"tau (s)",
"gamma (ohm)",
]
- assert isinstance(columns, list), columns
- assert len(columns) == 2
+ elif not isinstance(columns, list):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(columns) != 2:
+ raise ValueError(f"Expected a list with 2 items instead of {len(columns)=}")
+ elif not all(map(lambda s: isinstance(s, str), columns)):
+ raise TypeError(f"Expected a list of strings instead of {columns=}")
+ elif len(set(columns)) != 2:
+ raise ValueError(
+ f"Expected a list of 2 unique strings instead of {columns=}"
+ )
+
indices: Indices = self._get_peak_indices(
threshold,
self.gammas, # type: ignore
)
+
return DataFrame.from_dict(
{
columns[0]: self.time_constants[indices], # type: ignore
@@ -255,6 +275,7 @@ def to_statistics_dataframe(
"Log pseudo chi-squared": log(self.pseudo_chisqr),
"Lambda": self.lambda_value,
}
+
return DataFrame.from_dict(
{
"Label": list(statistics.keys()),
@@ -288,6 +309,7 @@ def _generate_truncated_multivariate_gaussians(
initial_X: NDArray[float64], # d * 1 dimensions
cov: bool = True, # True -> M is the covariance and the mean is mu_r, False -> M is a precision matrix (log-density == -1/2 X'*M*X + r'*X)
L: int = 1, # Number of samples
+ callback: Optional[Callable] = None,
) -> NDArray[float64]:
"""
Algorithm described in http://arxiv.org/abs/1208.4118
@@ -316,28 +338,36 @@ def _generate_truncated_multivariate_gaussians(
Returns an array (d * L dimensions) where each column is a sample
"""
- from scipy.linalg import solve as solve_linalg
+ if g.shape[0] != F.shape[0]:
+ raise ValueError(
+ f"Constraint dimensions do not match: {g.shape[0]=} should be equal to {F.shape[0]=}"
+ )
- assert (
- g.shape[0] == F.shape[0]
- ), f"Constraint dimensions do not match: {g.shape[0]} != {F.shape[0]}"
- R: NDArray[float64] = cholesky(M) # .T?
+ R: NDArray[float64] = cholesky(M)
R = R.T # change the lower matrix to upper matrix
+
mu: NDArray[float64]
- if cov is True: # Using M as a covariance matrix
+ if cov: # Using M as a covariance matrix
mu = mu_r
g = g + F @ mu
F = F @ R.T
- initial_X = solve_linalg(R.T, (initial_X - mu))
+ initial_X = solve(R.T, (initial_X - mu))
+
else: # Using M as a precision matrix
- mu = solve_linalg(R, solve_linalg(R.T, mu_r))
+ mu = solve(R, solve(R.T, mu_r))
g = g + F @ mu
- F = solve_linalg(R, F)
+ F = solve(R, F)
initial_X = R @ (initial_X - mu)
- assert (F @ initial_X + g).any() >= 0, "Inconsistent initial condition!"
+
+ if (F @ initial_X + g).any() < 0:
+ raise ValueError(
+ f"Inconsistent initial condition: {(F @ initial_X + g).any() < 0=})"
+ )
+
# Dimension of mean vector; each sample must be of this dimension
d: int = initial_X.shape[0]
near_zero: float = 1e-12
+
# Squared Euclidean norm of constraint matrix columns
F2: NDArray[float64] = array_sum(square(F), axis=1)
Ft: NDArray[float64] = F.T
@@ -350,24 +380,31 @@ def _generate_truncated_multivariate_gaussians(
dtype=float64,
)
Xs[:, 0] = initial_X
+
# Generate samples
+ start: float = time()
i: int = 2
while i <= L:
+ sample_start = time()
stop: bool = False
j: int = -1
+
# Generate initial velocity from normal distribution
V0: NDArray[float64] = randn(d)
X: NDArray[float64] = last_X
T: float = pi / 2
tt: float = 0.0
+
while True:
a: NDArray[float64] = real(V0)
b: NDArray[float64] = X
fa: NDArray[float64] = F @ a
fb: NDArray[float64] = F @ b
U: NDArray[float64] = sqrt(square(fa) + square(fb))
+
# Has to be arctan2 not arctan
phi: NDArray[float64] = arctan2(-fa, fb)
+
# Find the locations where the constraints were hit
pn: NDArray[float64] = array(array_abs(divide(g, U)) <= 1)
if pn.any():
@@ -376,6 +413,7 @@ def _generate_truncated_multivariate_gaussians(
t1: NDArray[float64] = array_abs(
-1.0 * phn + arccos(divide(-1.0 * g[pn], U[pn]))
)
+
# If there was a previous reflection (j > -1) and there is a potential
# reflection at the sample plane, then make sure that a new reflection
# at j is not found because of numerical error
@@ -389,65 +427,101 @@ def _generate_truncated_multivariate_gaussians(
or array_abs(tt1 - pi) < near_zero
):
t1[indj] = inf
+
mt: float64 = array_min(t1)
m_ind = argmin(t1)
j = inds[m_ind]
else:
mt = T
+
# Update travel time
tt = tt + mt
if tt >= T:
mt = mt - (tt - T)
stop = True
+
# Update position and velocity
X = a * sin(mt) + b * cos(mt)
V = a * cos(mt) - b * sin(mt)
if stop:
break
+
# Update new velocity
qj = F[j, :] @ V / F2[j]
V0 = V - 2 * qj * Ft[:, j]
+
if (F @ X + g).all() > 0:
Xs[:, i - 1] = X
last_X = X
i = i + 1
+ if callback is not None:
+ now: float = time()
+ callback(now - start, now - sample_start, i)
+
if cov:
Xs = R.T @ Xs + repmat(mu.reshape(mu.shape[0], 1), 1, L)
else:
- Xs = solve_linalg(R, Xs) + repmat(mu.reshape(mu.shape[0], 1), 1, L)
- return Xs
+ Xs = solve(R, Xs) + repmat(mu.reshape(mu.shape[0], 1), 1, L)
+ return Xs
-def _calculate_credible_intervals(args) -> Tuple[Gammas, Gammas, Gammas]:
- from scipy.linalg import inv as invert
- num_RL: int
- num_samples: int
- mu: NDArray[float64]
- Sigma_inv: NDArray[float64]
- x: NDArray[float64]
- tau_fine: TimeConstants
- tau: TimeConstants
- epsilon: float
- rbf_type: str
- (
- num_RL,
- num_samples,
- mu,
- Sigma_inv,
- x,
- tau_fine,
- tau,
- epsilon,
- rbf_type,
- ) = args
+def _calculate_credible_intervals(
+ num_RL: int,
+ num_samples: int,
+ mu: NDArray[float64],
+ Sigma_inv: NDArray[float64],
+ x: NDArray[float64],
+ tau_fine: TimeConstants,
+ tau: TimeConstants,
+ epsilon: float,
+ rbf_type: str,
+ timeout: int,
+ prog: Progress,
+) -> Tuple[Gammas, Gammas, Gammas]:
# Calculation of credible interval according to Bayesian statistics
mu = mu[num_RL:]
Sigma_inv = Sigma_inv[num_RL:, num_RL:]
+
# Cholesky transform instead of direct inverse
L_Sigma_inv: NDArray[float64] = cholesky(Sigma_inv)
- L_Sigma_agm: NDArray[float64] = invert(L_Sigma_inv)
+ L_Sigma_agm: NDArray[float64] = inv(L_Sigma_inv)
Sigma: NDArray[float64] = L_Sigma_agm.T @ L_Sigma_agm
+
+ def callback(
+ total_duration: float,
+ sample_duration: float,
+ num_samples_collected: int,
+ ):
+ # print(f"{duration=}")
+ if timeout > 0 and total_duration >= timeout:
+ raise DRTError(
+ "Timed out while calculating credible intervals! Adjust the timeout limit and try again."
+ )
+
+ status: str = f"{num_samples_collected}/{num_samples} samples ("
+
+ seconds: int = int(
+ ceil(
+ total_duration
+ / num_samples_collected
+ * (num_samples - num_samples_collected)
+ )
+ )
+ minutes: int = seconds // 60
+ status += f"~{minutes if minutes > 0 else seconds} {'min' if minutes > 0 else 's'} remaining"
+
+ if timeout > 0:
+ seconds = int(ceil(timeout - total_duration))
+ minutes = seconds // 60
+ status += f", timing out in ~{minutes if minutes > 0 else seconds} {'min' if minutes > 0 else 's'}"
+
+ status += ")"
+
+ force: bool = sample_duration > 1.0
+ prog.set_message(status, force=force)
+ prog.increment(force=force)
+
# Using generate_tmg from HMC_exact.py to sample the truncated Gaussian distribution
Xs: NDArray[float64] = _generate_truncated_multivariate_gaussians(
eye(x.shape[0], dtype=float64),
@@ -457,7 +531,9 @@ def _calculate_credible_intervals(args) -> Tuple[Gammas, Gammas, Gammas]:
x,
True,
num_samples,
+ callback,
)
+
lower_bound: Gammas
upper_bound: Gammas
# map array to gamma
@@ -482,6 +558,7 @@ def _calculate_credible_intervals(args) -> Tuple[Gammas, Gammas, Gammas]:
epsilon,
rbf_type,
)
+
return (
mean_gamma,
lower_bound,
@@ -506,7 +583,11 @@ def _rbf_epsilon_functions(func: Callable) -> Callable:
"cauchy": lambda x: 1 / (1 + abs(x)) - 0.5,
"piecewise-linear": lambda x: 0.0,
}
- assert set(_RBF_TYPES) == set(switch.keys())
+
+ if set(_RBF_TYPES) != set(switch.keys()):
+ raise KeyError(
+ f"Expected the switch keys ({switch.keys()=}) to match the RBF types ({_RBF_TYPES})"
+ )
def wrapper(*args, **kwargs):
kwargs["rbf_functions"] = switch
@@ -525,12 +606,14 @@ def _compute_epsilon(
) -> float:
if rbf_type == "piecewise-linear":
return 0.0
+
elif rbf_shape == "fwhm":
from scipy.optimize import fsolve
FWHM_coeff: NDArray[float64] = 2 * fsolve(rbf_functions[rbf_type], 1)
delta: float = mean(diff(ln(1 / f.reshape(f.shape[0]))))
return (shape_coeff * FWHM_coeff / delta)[0]
+
# "factor"
return shape_coeff
@@ -558,7 +641,11 @@ def _rbf_A_matrix_functions(func: Callable) -> Callable:
"inverse-quadric": lambda x, epsilon: 1 / sqrt(1 + (epsilon * x) ** 2),
"piecewise-linear": lambda x, epsilon: 0.0,
}
- assert set(_RBF_TYPES) == set(switch.keys())
+
+ if set(_RBF_TYPES) != set(switch.keys()):
+ raise KeyError(
+ f"Expected the switch keys ({switch.keys()=}) to match the RBF types ({_RBF_TYPES})"
+ )
def wrapper(*args, **kwargs):
kwargs["rbf_functions"] = switch
@@ -580,8 +667,9 @@ def _A_matrix_element(
alpha: float = 2 * pi * f * tau
rbf_func: Callable = rbf_functions[rbf_type]
+
integrand: Callable
- if real is True:
+ if real:
integrand = (
lambda x: 1.0 / (1.0 + (alpha**2) * exp(2.0 * x)) * rbf_func(x, epsilon)
)
@@ -591,6 +679,7 @@ def _A_matrix_element(
/ (1.0 / exp(x) + (alpha**2) * exp(x))
* rbf_func(x, epsilon)
)
+
return quad(integrand, -50, 50, epsabs=1e-9, epsrel=1e-9)[0]
@@ -607,9 +696,11 @@ def _assemble_A_matrix(args) -> NDArray[float64]:
real,
rbf_type,
) = args
+
w: NDArray[float64] = 2 * pi * f
num_freqs: int = f.shape[0]
num_taus: int = tau.shape[0]
+
A: NDArray[float64]
i: int
j: int
@@ -625,10 +716,17 @@ def _assemble_A_matrix(args) -> NDArray[float64]:
C: NDArray[float64] = zeros(num_freqs, dtype=float64)
for i in range(0, num_freqs):
C[i] = _A_matrix_element(f[i], tau[0], epsilon, real, rbf_type)
+
R: NDArray[float64] = zeros(num_taus, dtype=float64)
for j in range(0, num_taus):
R[j] = _A_matrix_element(f[0], tau[j], epsilon, real, rbf_type)
+
+ if not real:
+ C *= -1
+ R *= -1
+
A = toeplitz(C, R)
+
else:
# Use brute force
A = zeros(
@@ -638,10 +736,11 @@ def _assemble_A_matrix(args) -> NDArray[float64]:
),
dtype=float64,
)
+
for i in range(0, num_freqs):
for j in range(0, num_taus):
if rbf_type == "piecewise-linear":
- if real is True:
+ if real:
A[i, j] = (
0.5
/ (1 + (w[i] * tau[j]) ** 2)
@@ -652,7 +751,6 @@ def _assemble_A_matrix(args) -> NDArray[float64]:
)
else:
A[i, j] = (
- # -0.5
0.5
* (w[i] * tau[j])
/ (1 + (w[i] * tau[j]) ** 2)
@@ -663,7 +761,11 @@ def _assemble_A_matrix(args) -> NDArray[float64]:
)
else:
A[i, j] = _A_matrix_element(f[i], tau[j], epsilon, real, rbf_type)
- return (1 if real is True else -1) * A
+
+ if not real:
+ A *= -1
+
+ return A
def _inner_product_rbf(
@@ -674,11 +776,13 @@ def _inner_product_rbf(
rbf_type: str,
) -> float:
a: float = epsilon * ln(f_i / f_j)
+
if rbf_type == "c0-matern":
if derivative_order == 1:
return epsilon * (1 - abs(a)) * exp(-abs(a))
elif derivative_order == 2:
return epsilon**3 * (1 + abs(a)) * exp(-abs(a))
+
elif rbf_type == "c2-matern":
if derivative_order == 1:
return epsilon / 6 * (3 + 3 * abs(a) - abs(a) ** 3) * exp(-abs(a))
@@ -689,6 +793,7 @@ def _inner_product_rbf(
* (3 + 3 * abs(a) - 6 * abs(a) ** 2 + abs(a) ** 3)
* exp(-abs(a))
)
+
elif rbf_type == "c4-matern":
if derivative_order == 1:
return (
@@ -711,6 +816,7 @@ def _inner_product_rbf(
* (45 + 45 * abs(a) - 15 * abs(a) ** 3 - 5 * abs(a) ** 4 + abs(a) ** 5)
* exp(-abs(a))
)
+
elif rbf_type == "c6-matern":
if derivative_order == 1:
return (
@@ -743,6 +849,7 @@ def _inner_product_rbf(
)
* exp(-abs(a))
)
+
elif rbf_type == "cauchy":
if a == 0:
if derivative_order == 1:
@@ -771,7 +878,9 @@ def _inner_product_rbf(
1 + abs(a)
)
denominator = abs(a) ** 5 * (1 + abs(a)) * (2 + abs(a)) ** 5
+
return 8 * epsilon**3 * numerator / denominator
+
elif rbf_type == "gaussian":
if derivative_order == 1:
return -epsilon * (-1 + a**2) * exp(-(a**2 / 2)) * sqrt(pi / 2)
@@ -782,6 +891,7 @@ def _inner_product_rbf(
* exp(-(a**2 / 2))
* sqrt(pi / 2)
)
+
elif rbf_type == "inverse-quadratic":
if derivative_order == 1:
return 4 * epsilon * (4 - 3 * a**2) * pi / ((4 + a**2) ** 3)
@@ -793,6 +903,7 @@ def _inner_product_rbf(
* epsilon**3
/ ((4 + a**2) ** 5)
)
+
elif rbf_type == "inverse-quadric":
from scipy.integrate import quad
@@ -800,6 +911,7 @@ def _inner_product_rbf(
y_j: float = -ln(f_j)
rbf_i: Callable = lambda y: 1 / sqrt(1 + (epsilon * (y - y_i)) ** 2)
rbf_j: Callable = lambda y: 1 / sqrt(1 + (epsilon * (y - y_j)) ** 2)
+
delta: float
sqr_drbf_dy: Callable
if derivative_order == 1:
@@ -812,6 +924,7 @@ def _inner_product_rbf(
/ (2 * delta)
* (rbf_j(y + delta) - rbf_j(y - delta))
)
+
elif derivative_order == 2:
delta = 1e-4
sqr_drbf_dy = (
@@ -822,9 +935,15 @@ def _inner_product_rbf(
/ (delta**2)
* (rbf_j(y + delta) - 2 * rbf_j(y) + rbf_j(y - delta))
)
+ else:
+ raise NotImplementedError(f"Unsupported {derivative_order=}")
+
return quad(sqr_drbf_dy, -50, 50, epsabs=1e-9, epsrel=1e-9)[0]
- assert rbf_type not in _RBF_TYPES, f"Unsupported RBF type: {rbf_type}"
- return -1.0 # Just to satisfy mypy
+
+ if rbf_type in _RBF_TYPES:
+ raise NotImplementedError(f"Unsupported RBF type: {rbf_type}")
+
+ raise ValueError(f"Unknown/invalid RBF type {rbf_type}")
def _assemble_M_matrix(
@@ -836,6 +955,7 @@ def _assemble_M_matrix(
f: Frequencies = 1 / tau
num_freqs: int = f.shape[0]
num_taus: int = tau.shape[0]
+
M: NDArray[float64]
i: int
j: int
@@ -855,6 +975,7 @@ def _assemble_M_matrix(
derivative_order,
rbf_type,
) # TODO: Maybe use tau instead of freq (pyDRTtools comment)
+
R: NDArray[float64] = zeros(num_taus, dtype=float64)
for j in range(0, num_taus):
R[j] = _inner_product_rbf(
@@ -865,6 +986,7 @@ def _assemble_M_matrix(
rbf_type,
)
M = toeplitz(C, R)
+
elif rbf_type == "piecewise-linear":
if derivative_order == 1:
M = zeros(
@@ -878,6 +1000,7 @@ def _assemble_M_matrix(
delta_loc: float = ln((1 / f[i + 1]) / (1 / f[i]))
M[i, i] = -1 / delta_loc
M[i, i + 1] = 1 / delta_loc
+
elif derivative_order == 2:
M = zeros(
(
@@ -897,7 +1020,9 @@ def _assemble_M_matrix(
M[i, i] = 1.0 / (delta_loc**2)
M[i, i + 1] = -2.0 / (delta_loc**2)
M[i, i + 2] = 1.0 / (delta_loc**2)
+
M = M.T @ M
+
else:
# Brute force
M = zeros(
@@ -916,6 +1041,7 @@ def _assemble_M_matrix(
derivative_order,
rbf_type,
) # TODO: Maybe use tau instead of freq? See previous pyDRTtools comment.
+
return M
@@ -928,6 +1054,7 @@ def _quad_format(
H: NDArray[float64] = 2 * (A.T @ A + lambda_value * M)
H = (H.T + H) / 2
c: NDArray[float64] = -2 * b.T @ A
+
return (
H,
c,
@@ -945,39 +1072,13 @@ def _quad_format_combined(
H: NDArray[float64] = 2 * ((A_re.T @ A_re + A_im.T @ A_im) + lambda_value * M)
H = (H.T + H) / 2
c: NDArray[float64] = -2 * (b_im.T @ A_im + b_re.T @ A_re)
+
return (
H,
c,
)
-def _solve_qp_cvxpy(
- H: NDArray[float64],
- c: NDArray[float64],
-) -> NDArray[float64]:
- from cvxpy import (
- Minimize,
- Problem,
- Variable,
- quad_form,
- )
-
- N_out: int = c.shape[0]
- x: Variable = Variable(shape=N_out, value=ones(N_out, dtype=float64))
- l: NDArray[float64] = zeros(N_out, dtype=float64)
- prob: Problem = Problem(Minimize((1 / 2) * quad_form(x, H) + c @ x), [x >= l])
- prob.solve(
- # verbose=True,
- eps_abs=1e-10,
- eps_rel=1e-10,
- sigma=1.00e-08,
- max_iter=200000,
- eps_prim_inf=1e-5,
- eps_dual_inf=1e-5,
- )
- return x.value
-
-
def _solve_qp_cvxopt(
H: NDArray[float64],
c: NDArray[float64],
@@ -987,42 +1088,41 @@ def _solve_qp_cvxopt(
b: Optional[NDArray[float64]] = None,
) -> NDArray[float64]:
try:
- from kvxopt import (
+ from cvxopt import (
matrix,
solvers,
)
except ImportError:
- from cvxopt import (
+ from kvxopt import (
matrix,
solvers,
)
+
args: List[matrix] = [matrix(H), matrix(c)]
+
if G is not None:
- assert h is not None
+ if not _is_floating_array(h):
+ raise TypeError(f"Expected an NDArray[floating] instead of {h=}")
+
args.extend([matrix(G), matrix(h)])
+
if A is not None:
- assert b is not None
+ if not _is_floating_array(b):
+ raise TypeError(f"Expected an NDArray[floating] instead of {b=}")
+
args.extend([matrix(A), matrix(b)])
+
solvers.options["abstol"] = 1e-15
solvers.options["reltol"] = 1e-15
- solution: dict = solvers.qp(*args)
- if "optimal" not in solution["status"]:
- raise DRTError("Failed to find optimal solution!")
- return array(solution["x"]).reshape((H.shape[1],))
+ solution: dict = solvers.qp(
+ *args,
+ options={"show_progress": False},
+ )
+ if "optimal" not in solution["status"]:
+ raise DRTError("Failed to find optimal solution")
-def _solve_qp(
- H: NDArray[float64],
- c: NDArray[float64],
- G: Optional[NDArray[float64]] = None,
- h: Optional[NDArray[float64]] = None,
- A: Optional[NDArray[float64]] = None,
- b: Optional[NDArray[float64]] = None,
-) -> NDArray[float64]:
- try:
- return _solve_qp_cvxpy(H, c)
- except Exception:
- return _solve_qp_cvxopt(H, c, G, h, A, b)
+ return array(solution["x"]).reshape((H.shape[1],))
def _rbf_gamma_functions(func: Callable) -> Callable:
@@ -1048,7 +1148,11 @@ def _rbf_gamma_functions(func: Callable) -> Callable:
"inverse-quadric": lambda x, epsilon: 1 / sqrt(1 + (epsilon * x) ** 2),
"piecewise-linear": lambda x, epsilon: 0.0,
}
- assert set(_RBF_TYPES) == set(switch.keys())
+
+ if set(_RBF_TYPES) != set(switch.keys()):
+ raise KeyError(
+ f"Expected the switch keys ({switch.keys()=}) to match the RBF types ({_RBF_TYPES})"
+ )
def wrapper(*args, **kwargs):
kwargs["rbf_functions"] = switch
@@ -1072,8 +1176,10 @@ def _x_to_gamma(
tau,
x,
)
+
num_taus: int = tau.shape[0]
num_fine_taus: int = tau_fine.shape[0]
+
B: NDArray[float64] = zeros(
(
num_fine_taus,
@@ -1081,13 +1187,16 @@ def _x_to_gamma(
),
dtype=float64,
)
+
rbf: Callable = rbf_functions[rbf_type]
+
i: int
j: int
for i in range(0, num_fine_taus):
for j in range(0, num_taus):
delta_ln_tau = ln(tau_fine[i]) - ln(tau[j])
B[i, j] = rbf(delta_ln_tau, epsilon)
+
return (
tau_fine,
B @ x,
@@ -1100,20 +1209,13 @@ def _prepare_complex_matrices(
b_re: NDArray[float64],
b_im: NDArray[float64],
M: NDArray[float64],
- lambda_value: float,
f: Frequencies,
num_freqs: int,
num_taus: int,
inductance: bool,
-) -> Tuple[
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- int,
-]:
- num_RL: int = 2 if inductance is True else 1
+) -> Tuple[NDArray[float64], NDArray[float64], NDArray[float64],]:
+ num_RL: int = 2 if inductance else 1
+
tmp: NDArray[float64] # Used for temporary binding of matrices
tmp = A_re
A_re = zeros(
@@ -1124,7 +1226,7 @@ def _prepare_complex_matrices(
dtype=float64,
)
A_re[:, num_RL:] = tmp
- A_re[:, 1 if inductance is True else 0] = 1
+ A_re[:, 1 if inductance else 0] = 1
tmp = A_im
A_im = zeros(
@@ -1135,7 +1237,7 @@ def _prepare_complex_matrices(
dtype=float64,
)
A_im[:, num_RL:] = tmp
- if inductance is True:
+ if inductance:
A_im[:, 0] = 2 * pi * f
tmp = M
@@ -1148,22 +1250,7 @@ def _prepare_complex_matrices(
)
M[num_RL:, num_RL:] = tmp
- H, c = _quad_format_combined(
- A_re,
- A_im,
- b_re,
- b_im,
- M,
- lambda_value,
- )
- return (
- A_re,
- A_im,
- M,
- H,
- c,
- num_RL,
- )
+ return (A_re, A_im, M, num_RL)
def _prepare_real_matrices(
@@ -1172,18 +1259,11 @@ def _prepare_real_matrices(
b_re: NDArray[float64],
b_im: NDArray[float64],
M: NDArray[float64],
- lambda_value: float,
num_freqs: int,
num_taus: int,
-) -> Tuple[
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- int,
-]:
+) -> Tuple[NDArray[float64], NDArray[float64], NDArray[float64], int,]:
num_RL: int = 1
+
tmp: NDArray[float64] # Used for temporary binding of matrices
tmp = A_re
A_re = zeros(
@@ -1216,18 +1296,10 @@ def _prepare_real_matrices(
)
M[num_RL:, num_RL:] = tmp
- H, c = _quad_format(
- A_re,
- b_re,
- M,
- lambda_value,
- )
return (
A_re,
A_im,
M,
- H,
- c,
num_RL,
)
@@ -1238,20 +1310,13 @@ def _prepare_imaginary_matrices(
b_re: NDArray[float64],
b_im: NDArray[float64],
M: NDArray[float64],
- lambda_value: float,
f: Frequencies,
num_freqs: int,
num_taus: int,
inductance: bool,
-) -> Tuple[
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- int,
-]:
- num_RL: int = 1 if inductance is True else 0
+) -> Tuple[NDArray[float64], NDArray[float64], NDArray[float64], int,]:
+ num_RL: int = 1 if inductance else 0
+
tmp: NDArray[float64] # Used for temporary binding of matrices
tmp = A_re
A_re = zeros(
@@ -1272,8 +1337,9 @@ def _prepare_imaginary_matrices(
dtype=float64,
)
A_im[:, num_RL:] = tmp
- if inductance is True:
+ if inductance:
A_im[:, 0] = 2 * pi * f
+
tmp = M
M = zeros(
(
@@ -1284,232 +1350,316 @@ def _prepare_imaginary_matrices(
)
M[num_RL:, num_RL:] = tmp
- H, c = _quad_format(
- A_im,
- b_im,
- M,
- lambda_value,
- )
return (
A_re,
A_im,
M,
- H,
- c,
num_RL,
)
-def _lambda_process(
- args,
-) -> Optional[
- Tuple[
- float,
- float,
- NDArray[float64],
- ComplexImpedances,
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- int,
- ]
-]:
- A_re: NDArray[float64]
- A_im: NDArray[float64]
- Z: ComplexImpedances
- M: NDArray[float64]
- lambda_value: float
- f: Frequencies
- tau: TimeConstants
- tau_fine: TimeConstants
- epsilon: float
- mode: str
- rbf_type: str
- inductance: bool
- maximum_symmetry: float
- (
- A_re,
- A_im,
- Z,
- M,
- lambda_value,
- f,
- tau,
- tau_fine,
- epsilon,
- mode,
- rbf_type,
- inductance,
- maximum_symmetry,
- ) = args
- num_freqs: int = f.size
- num_taus: int = tau.size
- num_RL: int # The number of R and/or L elements in series
- H: NDArray[float64]
- c: NDArray[float64]
- if mode == "complex":
- A_re, A_im, M, H, c, num_RL = _prepare_complex_matrices(
- A_re,
- A_im,
- Z.real,
- Z.imag,
- M,
- lambda_value,
- f,
- num_freqs,
- num_taus,
- inductance,
- )
- elif mode == "real":
- A_re, A_im, M, H, c, num_RL = _prepare_real_matrices(
- A_re,
- A_im,
- Z.real,
- Z.imag,
- M,
- lambda_value,
- num_freqs,
- num_taus,
- )
- elif mode == "imaginary":
- A_re, A_im, M, H, c, num_RL = _prepare_imaginary_matrices(
- A_re,
- A_im,
- Z.real,
- Z.imag,
- M,
- lambda_value,
- f,
- num_freqs,
- num_taus,
- inductance,
- )
+def _attempt_importing_solver():
try:
- x: NDArray[float64] = _solve_qp(H, c)
- except Exception:
- return None
- gamma: Gammas
- _, gamma = _x_to_gamma(x[num_RL:], tau_fine, tau, epsilon, rbf_type)
- min_gamma: float = abs(min(gamma))
- max_gamma: float = abs(max(gamma))
- score: float = 1.0 - ((max_gamma - min_gamma) / max(min_gamma, max_gamma))
- if score > maximum_symmetry:
- return None
- Z_fit: ComplexImpedances = array(
- list(map(lambda _: complex(_[0], _[1]), zip(A_re @ x, A_im @ x))),
- dtype=ComplexImpedance,
+ import cvxopt
+ except ImportError:
+ import kvxopt
+
+
+def _gcv_wrapper(func: Callable) -> Callable:
+ def wrapper(
+ ln_lambda: float64,
+ A_re: NDArray[float64],
+ A_im: NDArray[float64],
+ Z_re: NDArray[float64],
+ Z_im: NDArray[float64],
+ M: NDArray[float64],
+ ):
+ lambda_value: float64 = exp(ln_lambda)
+
+ # See eq. 5 in https://doi.org/10.1149/1945-7111/acbca4
+ A: NDArray[float64] = concatenate((A_re, A_im), axis=0)
+ Z: NDArray[float64] = concatenate((Z_re, Z_im), axis=0)
+
+ # See eq. 13 in https://doi.org/10.1149/1945-7111/acbca4
+ A_agm: NDArray[float64] = A.T @ A + lambda_value * M
+
+ if not _is_positive_definite(A_agm):
+ A_agm = _nearest_positive_definite(A_agm)
+
+ # Cholesky transform to invert A_agm
+ L_agm: NDArray[float64] = cholesky(A_agm)
+ inv_L_agm: NDArray[float64] = inv(L_agm)
+
+ # Inverse of A_agm
+ # See eq. 13 in https://doi.org/10.1149/1945-7111/acbca4
+ inv_A_agm: NDArray[float64] = inv_L_agm.T @ inv_L_agm
+ A_GCV: NDArray[float64] = A @ inv_A_agm @ A.T
+
+ return func(
+ M=Z_re.shape[0],
+ I=eye(2 * Z_re.shape[0]),
+ K=A_GCV,
+ Z_exp=Z,
+ )
+
+ return wrapper
+
+
+@_gcv_wrapper
+def _compute_generalized_cross_validation(
+ M: int,
+ I: NDArray[float64],
+ K: NDArray[float64],
+ Z_exp: NDArray[float64],
+) -> float64:
+ """
+ This function computes the score for the generalized cross-validation (GCV) approach.
+
+ Reference: G. Wahba, A comparison of GCV and GML for choosing the smoothing parameter in the generalized spline smoothing problem, Ann. Statist. 13 (1985) 1378–1402.
+ """
+ # See eq. 13 in https://doi.org/10.1149/1945-7111/acbca4
+ num: float64 = (norm((I - K) @ Z_exp) ** 2) / (2 * M)
+ den: float64 = (trace(I - K) / (2 * M)) ** 2
+ score: float64 = num / den
+
+ return score
+
+
+@_gcv_wrapper
+def _compute_modified_gcv(
+ M: int,
+ I: NDArray[float64],
+ K: NDArray[float64],
+ Z_exp: NDArray[float64],
+) -> float64:
+ """
+ This function computes the score for the modified generalized cross validation (mGCV) approach.
+
+ Reference: Y.J. Kim, C. Gu, Smoothing spline Gaussian regression: More scalable computation via efficient approximation, J. Royal Statist. Soc. 66 (2004) 337–356.
+ """
+ # the stabilization parameter, rho, is computed as described by Kim et al.
+ # See eq. 15 in https://doi.org/10.1149/1945-7111/acbca4
+ rho: float = 2.0 if M >= 50 else 1.3
+
+ # See eq. 14 in https://doi.org/10.1149/1945-7111/acbca4
+ num: float64 = (norm((I - K) @ Z_exp) ** 2) / (2 * M)
+ den: float64 = (trace(I - rho * K) / (2 * M)) ** 2
+ score: float64 = num / den
+
+ return score
+
+
+@_gcv_wrapper
+def _compute_robust_gcv(
+ M: int,
+ I: NDArray[float64],
+ K: NDArray[float64],
+ Z_exp: NDArray[float64],
+) -> float64:
+ """
+ This function computes the score for the robust generalized cross-validation (rGCV) approach.
+
+ Reference: M. A. Lukas, F. R. de Hoog, R. S. Anderssen, Practical use of robust GCV and modified GCV for spline smoothing, Comput. Statist. 31 (2016) 269–289.
+ """
+ # See eq. 13 in https://doi.org/10.1149/1945-7111/acbca4
+ num: float64 = (norm((I - K) @ Z_exp) ** 2) / (2 * M)
+ den: float64 = (trace(I - K) / (2 * M)) ** 2
+ gcv_score: float64 = num / den
+
+ # The robust parameter, xsi, is computed as described in Lukas et al.
+ # See eq. 16 in https://doi.org/10.1149/1945-7111/acbca4
+ xi: float = 0.3 if M >= 50 else 0.2
+ mu_2: float64 = trace(K.T @ K) / (2 * M)
+ score: float = (xi + (1 - xi) * mu_2) * gcv_score
+
+ return score
+
+
+# TODO: This seems to be giving different answers compared to pyDRTtools
+# for some reason.
+def _compute_re_im_cross_validation(
+ ln_lambda: float64,
+ A_re: NDArray[float64],
+ A_im: NDArray[float64],
+ Z_re: NDArray[float64],
+ Z_im: NDArray[float64],
+ M: NDArray[float64],
+) -> float64:
+ """
+ This function computes the score for real-imaginary discrepancy (re-im).
+ Inputs:
+ ln_lambda: regularization parameter
+ A_re: discretization matrix for the real part of the impedance
+ A_im: discretization matrix for the real part of the impedance
+ Z_re: vector of the real parts of the impedance
+ Z_im: vector of the imaginary parts of the impedance
+ M: differentiation matrix
+ """
+ lambda_value: float64 = exp(ln_lambda)
+
+ # Non-negativity constraint on the DRT gmma
+ # + 1 if a resistor or an inductor is included in the DRT model
+ h: NDArray[float64] = zeros([Z_re.shape[0] + 1])
+ G: NDArray[float64] = -eye(h.shape[0])
+
+ # quadratic programming through cvxopt
+ H_re: NDArray[float64]
+ c_re: NDArray[float64]
+ gamma_ridge_re: NDArray[float64]
+ H_re, c_re = _quad_format(A_re, Z_re, M, lambda_value)
+ gamma_ridge_re = _solve_qp_cvxopt(H_re, c_re, G=G, h=h)
+
+ H_im: NDArray[float64]
+ c_im: NDArray[float64]
+ gamma_ridge_im: NDArray[float64]
+ H_im, c_im = _quad_format(A_im, Z_im, M, lambda_value)
+ gamma_ridge_im = _solve_qp_cvxopt(H_im, c_im, G=G, h=h)
+
+ # stacking the resistance R and inductance L on top of gamma_ridge_im and gamma_ridge_re, repectively
+ gamma_ridge_re_cv: NDArray[float64] = concatenate(
+ (array([0, gamma_ridge_re[1]]), gamma_ridge_im[2:])
)
- return (
- lambda_value,
- sqrt(array_sum(gamma**2)),
- x,
- Z_fit,
- A_re,
- A_im,
- M,
- H,
- c,
- num_RL,
+ gamma_ridge_im_cv: NDArray[float64] = concatenate(
+ (array([gamma_ridge_im[0], 0]), gamma_ridge_re[2:])
)
+ # See eq. 13 in https://doi.org/10.1016/j.electacta.2014.09.058
+ # or eq. (17) in https://doi.org/10.1149/1945-7111/acbca4
+ re_im_cv_score: float64 = (
+ norm(Z_re - A_re @ gamma_ridge_re_cv) ** 2
+ + norm(Z_im - A_im @ gamma_ridge_im_cv) ** 2
+ )
-def _suggest_lambda(
- lambda_values: NDArray[float64],
- solution_norms: NDArray[float64],
-) -> float:
- a: NDArray[float64] = zeros(lambda_values.size - 1, dtype=float64)
- for i in range(0, lambda_values.size - 1):
- a[i] = solution_norms[i] - solution_norms[i + 1]
- b: NDArray[float64] = zeros(a.size - 1, dtype=float64)
- for i in range(0, b.size - 1):
- b[i] = a[i] - a[i + 1]
- c: float
- for i, c in reversed(list(enumerate(b))):
- if c < 0.0:
- return lambda_values[(i + 1) if i < lambda_values.size - 2 else i]
- return lambda_values[-1]
+ return re_im_cv_score
-def _attempt_importing_solver():
- try:
- import cvxpy
- except ImportError:
- try:
- import kvxopt
- except ImportError:
- import cvxopt
+# TODO: Refactor and add type hints
+def _compute_L_curve(
+ ln_lambda: float64,
+ A_re: NDArray[float64],
+ A_im: NDArray[float64],
+ Z_re: NDArray[float64],
+ Z_im: NDArray[float64],
+ M: NDArray[float64],
+) -> float64:
+ """
+ This function computes the score for L curve (LC)
+
+ Reference: P.C. Hansen, D.P. O’Leary, The use of the L-curve in the regularization of discrete ill-posed problems, SIAM J. Sci. Comput. 14 (1993) 1487–1503.
+ """
+ lambda_value = exp(ln_lambda)
-def _l_curve_P(
- lambda_value: float,
+ A = concatenate(
+ (A_re, A_im), axis=0
+ ) # matrix A with A_re and A_im; # see (5) in [4]
+ Z = concatenate((Z_re, Z_im), axis=0) # stacked impedance
+
+ # numerator eta_num of the first derivative of eta = log(||Z_exp - Ax||^2)
+ A_agm = A.T @ A + lambda_value * M # see (13) in [4]
+ if not _is_positive_definite(A_agm):
+ A_agm = _nearest_positive_definite(A_agm)
+
+ L_agm = cholesky(A_agm) # Cholesky transform to inverse A_agm
+ inv_L_agm = inv(L_agm)
+ inv_A_agm = inv_L_agm.T @ inv_L_agm # inverse of A_agm
+ A_LC = A @ ((inv_A_agm.T @ inv_A_agm) @ inv_A_agm) @ A.T
+ eta_num = Z.T @ A_LC @ Z
+
+ # denominator eta_denom of the first derivative of eta
+ A_agm_d = A @ A.T + lambda_value * eye(A.shape[0])
+ if not _is_positive_definite(A_agm_d):
+ A_agm = _nearest_positive_definite(A_agm_d)
+
+ L_agm_d = cholesky(A_agm_d) # Cholesky transform to inverse A_agm_d
+ inv_L_agm_d = inv(L_agm_d)
+ inv_A_agm_d = inv_L_agm_d.T @ inv_L_agm_d
+ eta_denom = lambda_value * Z.T @ (inv_A_agm_d.T @ inv_A_agm_d) @ Z
+
+ # derivative of eta
+ eta_prime = eta_num / eta_denom
+
+ # numerator theta_num of the first derivative of theta = log(lambda*||Lx||^2)
+ theta_num = eta_num
+
+ # denominator theta_denom of the first derivative of theta
+ A_LC_d = A @ (inv_A_agm.T @ inv_A_agm) @ A.T
+ theta_denom = Z.T @ A_LC_d @ Z
+
+ # derivative of theta
+ theta_prime = -(theta_num) / theta_denom
+
+ # numerator LC_num of the LC score in (19) in [4]
+ a_sq = (eta_num / (eta_denom * theta_denom)) ** 2
+ p = (Z.T @ (inv_A_agm_d.T @ inv_A_agm_d) @ Z) * theta_denom
+ m = (
+ 2 * lambda_value * Z.T @ ((inv_A_agm_d.T @ inv_A_agm_d) @ inv_A_agm_d) @ Z
+ ) * theta_denom
+ q = (2 * lambda_value * Z.T @ (inv_A_agm_d.T @ inv_A_agm_d) @ Z) * eta_num
+ LC_num = a_sq * (p + m - q)
+
+ # denominator LC_denom of the LC score
+ LC_denom = ((eta_prime) ** 2 + (theta_prime) ** 2) ** (3 / 2)
+
+ # LC score ; see (19) in [4]
+ LC_score = LC_num / LC_denom
+
+ return -LC_score
+
+
+_CROSS_VALIDATION_METHODS: Dict[str, Callable] = {
+ "gcv": _compute_generalized_cross_validation, # Generalized cross-validation
+ "mgcv": _compute_modified_gcv, # Modified GCV
+ "rgcv": _compute_robust_gcv, # Robust GCV
+ "re-im": _compute_re_im_cross_validation, # Real-imaginary cross-validation
+ # "kf": _compute_, # k-fold GCV # TODO: Implement? Requires scikit-learn
+ "lc": _compute_L_curve, # L-curve
+}
+
+
+def _pick_lambda(
A_re: NDArray[float64],
A_im: NDArray[float64],
- Z_exp: ComplexImpedances,
+ Z_re: NDArray[float64],
+ Z_im: NDArray[float64],
M: NDArray[float64],
- f: Frequencies,
- tau: TimeConstants,
- tau_fine: TimeConstants,
- epsilon: float,
- mode: str,
- rbf_type: str,
- inductance: bool,
- maximum_symmetry: float,
-) -> Tuple[float64, float64]:
- result: Optional[
- Tuple[
- float,
- float,
- NDArray[float64],
- ComplexImpedances,
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- int,
- ]
- ] = _lambda_process(
- (
- A_re,
- A_im,
- Z_exp,
- M,
- lambda_value,
- f,
- tau,
- tau_fine,
- epsilon,
- mode,
- rbf_type,
- inductance,
- maximum_symmetry,
- )
+ lambda_0: float,
+ method: str,
+) -> float:
+ from scipy.optimize import (
+ OptimizeResult,
+ minimize,
)
- if result is None:
- return (inf, inf)
- assert result is not None
- return (
- log(norm(result[3] - Z_exp)**2),
- log(result[1] ** 2),
+
+ result: OptimizeResult = minimize(
+ _CROSS_VALIDATION_METHODS[method],
+ ln(lambda_0),
+ args=(A_re, A_im, Z_re, Z_im, M),
+ method="SLSQP",
+ bounds=[(ln(1e-7), ln(1e0))],
+ options={
+ "disp": False,
+ "maxiter": 2000,
+ },
)
+ return float(exp(result.x)[0])
+
def calculate_drt_tr_rbf(
data: DataSet,
mode: str = "complex",
lambda_value: float = -1.0,
+ cross_validation: str = "mgcv",
rbf_type: str = "gaussian",
derivative_order: int = 1,
rbf_shape: str = "fwhm",
shape_coeff: float = 0.5,
inductance: bool = False,
credible_intervals: bool = False,
- num_samples: int = 10000,
- maximum_symmetry: float = 0.3,
+ num_samples: int = 2000,
timeout: int = 60,
- num_procs: int = 0,
+ num_procs: int = -1,
**kwargs,
) -> TRRBFResult:
"""
@@ -1520,6 +1670,7 @@ def calculate_drt_tr_rbf(
- Wan, T. H., Saccoccio, M., Chen, C., and Ciucci, F., 2015, Electrochim. Acta, 184, 483-499 (https://doi.org/10.1016/j.electacta.2015.09.097)
- Ciucci, F. and Chen, C., 2015, Electrochim. Acta, 167, 439-454 (https://doi.org/10.1016/j.electacta.2015.03.123)
- Effat, M. B. and Ciucci, F., 2017, Electrochim. Acta, 247, 1117-1129 (https://doi.org/10.1016/j.electacta.2017.07.050)
+ - Maradesa, A., Py, B., Wan, T.H., Effat, M.B., and Ciucci F., 2023, J. Electrochem. Soc, 170, 030502 (https://doi.org/10.1149/1945-7111/acbca4)
Parameters
----------
@@ -1536,9 +1687,21 @@ def calculate_drt_tr_rbf(
lambda_value: float, optional
The Tikhonov regularization parameter.
- If the value is equal to or less than zero, then an attempt will be made to automatically find a suitable value.
- If the value is between -1.5 and 0.0, then a custom approach is used.
- If the value is less than -1.5, then the L-curve corner search algorithm (DOI:10.1088/2633-1357/abad0d) is used.
+ If ``cross_validation=""``, then the provided ``lambda_value`` is used directly.
+ Otherwise, the chosen cross-validation method is used to pick a suitable value and the provided ``lambda_value`` is simply used as the initial value.
+ If ``lambda_value`` is equal to or less than zero, and a cross-validation method has been chosen, then ``lambda_value`` is set to 1e-3.
+
+ cross_validation: str, optional
+ The lambda value can be optimized using one of several cross-validation methods.
+ Valid values include:
+
+ - "gcv" - generalized cross-validation (GCV)
+ - "mgcv" - modified GCV
+ - "rgcv" - robust GCV
+ - "re-im" - real-imaginary cross-validation
+ - "lc" - L-curve
+
+ An empty string (i.e., ``cross_validation=""``) forces ``lambda_value`` to be used directly.
rbf_type: str, optional
The type of function to use for discretization.
@@ -1577,11 +1740,6 @@ def calculate_drt_tr_rbf(
The number of samples drawn when calculating the Bayesian credible intervals.
A greater number provides better accuracy but requires more time.
- maximum_symmetry: float, optional
- A maximum limit (between 0.0 and 1.0) for the relative vertical symmetry of the DRT.
- A high degree of symmetry is common for results where the gamma value oscillates wildly (e.g., due to a small regularization parameter).
- The TR-RBF method only uses this limit when the regularization parameter (lambda) is not provided.
-
timeout: int, optional
The number of seconds to wait for the calculation of credible intervals to complete.
@@ -1594,10 +1752,9 @@ def calculate_drt_tr_rbf(
-------
TRRBFResult
"""
- from scipy.linalg import solve as solve_linalg
-
global _SOLVER_IMPORTED
- if _SOLVER_IMPORTED is False:
+
+ if not _SOLVER_IMPORTED:
try:
_attempt_importing_solver()
except ImportError:
@@ -1605,71 +1762,96 @@ def calculate_drt_tr_rbf(
else:
_SOLVER_IMPORTED = True
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert isinstance(mode, str), mode
- if mode not in _MODES:
- raise DRTError("Valid mode values: '" + "', '".join(_MODES))
- assert issubdtype(type(lambda_value), floating), lambda_value
- assert type(rbf_type) is str, rbf_type
- if rbf_type not in _RBF_TYPES:
- raise DRTError("Valid rbf_type values: '" + "', '".join(_RBF_TYPES))
- assert issubdtype(type(derivative_order), integer), derivative_order
- if not (1 <= derivative_order <= 2):
- raise DRTError("Valid derivative_order values: 1, 2")
- assert type(rbf_shape) is str, rbf_shape
- if rbf_shape not in _RBF_SHAPES:
- raise DRTError("Valid rbf_shape values: '" + "', '".join(_RBF_SHAPES))
- assert issubdtype(type(shape_coeff), floating), shape_coeff
- if shape_coeff <= 0.0:
- raise DRTError("The shape coefficient must be greater than 0.0!")
- assert isinstance(inductance, bool), inductance
- assert isinstance(credible_intervals, bool), credible_intervals
- assert issubdtype(type(num_samples), integer), num_samples
- if credible_intervals is True and num_samples < 1000:
- raise DRTError(f"{num_samples} is not enough samples!")
- assert issubdtype(type(maximum_symmetry), floating), maximum_symmetry
- if not (0.0 <= maximum_symmetry <= 1.0):
- raise DRTError("The maximum symmetry must be between 0.0 and 1.0 (inclusive)!")
- assert issubdtype(type(timeout), integer), timeout
- if credible_intervals is True and timeout < 1:
- raise DRTError("The timeout must be greater than 0!")
- assert issubdtype(type(num_procs), integer), num_procs
- if num_procs < 1:
- num_procs = _get_default_num_procs() - abs(num_procs)
- if num_procs < 1:
- num_procs = 1
- # TODO: Switch over to using the new cross-validation-based method(s)?
- min_log_lambda: float = -7.0
- max_log_lambda: float = 0.0
- lambda_values: NDArray[float64] = (
- logspace(
- min_log_lambda,
- max_log_lambda,
- num=round(max_log_lambda - min_log_lambda) + 1,
+ if not isinstance(mode, str):
+ raise TypeError(f"Expected a string instead of {mode=}")
+ elif mode not in _MODES:
+ raise ValueError("Valid mode values: '" + "', '".join(_MODES))
+
+ if not isinstance(cross_validation, str):
+ raise TypeError(f"Expected a string or None instead of {cross_validation=}")
+ elif not (cross_validation == "" or cross_validation in _CROSS_VALIDATION_METHODS):
+ raise ValueError(
+ "Valid cross-validation methods include:\n- "
+ + "\n- ".join(_CROSS_VALIDATION_METHODS.keys())
)
- if -1.5 <= lambda_value <= 0.0
- else array([lambda_value])
- )
+ elif cross_validation != "" and not (1e-7 < lambda_value < 1.0):
+ if lambda_value <= 0.0:
+ lambda_value = 1e-3
+ else:
+ # These are the bounds that are currently used by the _pick_lambda function.
+ raise ValueError(f"Expected 1e-7 < {lambda_value=} < 1.0")
+
+ if not _is_floating(lambda_value):
+ raise TypeError(f"Expected a float instead of {lambda_value=}")
+ elif not lambda_value > 0.0:
+ raise ValueError(
+ f"Expected a value greater than zero instead of {lambda_value=}"
+ )
+
+ if not isinstance(rbf_type, str):
+ raise TypeError(f"Expected a string instead of {rbf_type}")
+ elif rbf_type not in _RBF_TYPES:
+ raise ValueError("Valid rbf_type values: '" + "', '".join(_RBF_TYPES))
+
+ if not _is_integer(derivative_order):
+ raise TypeError(f"Expected an integer instead of {derivative_order=}")
+ elif not (1 <= derivative_order <= 2):
+ raise ValueError("Valid derivative_order values: 1, 2")
+
+ if not isinstance(rbf_shape, str):
+ raise TypeError(f"Expected a string instead of {rbf_shape=}")
+ elif rbf_shape not in _RBF_SHAPES:
+ raise ValueError("Valid rbf_shape values: '" + "', '".join(_RBF_SHAPES))
+
+ if not _is_floating(shape_coeff):
+ raise TypeError(f"Expected a float instead of {shape_coeff=}")
+ elif shape_coeff <= 0.0:
+ raise ValueError("The shape coefficient must be greater than 0.0")
+
+ if not _is_boolean(inductance):
+ raise TypeError(f"Expected a boolean instead of {inductance=}")
+
+ if not _is_boolean(credible_intervals):
+ raise TypeError(f"Expected a boolean instead of {credible_intervals=}")
+
+ if not _is_integer(num_samples):
+ raise TypeError(f"Expected an integer instead of {num_samples=}")
+ elif credible_intervals and num_samples < 1000:
+ raise ValueError("The number of samples must be greater than or equal to 1000")
+
+ if not _is_integer(timeout):
+ raise TypeError(f"Expected an integer instead of {timeout=}")
+
+ if not _is_integer(num_procs):
+ raise TypeError(f"Expected an integer instead of {num_procs=}")
+ elif num_procs < 1:
+ num_procs = max((_get_default_num_procs() - abs(num_procs), 1))
+
# TODO: Figure out if f and Z need to be altered depending on the value
# of the 'inductance' argument!
f: Frequencies = data.get_frequencies()
+ if len(f) < 1:
+ raise ValueError(
+ f"There are no unmasked data points in the '{data.get_label()}' data set parsed from '{data.get_path()}'"
+ )
+
Z_exp: ComplexImpedances = data.get_impedances()
+
tau: TimeConstants = 1 / f
tau_fine: TimeConstants = logspace(
log(tau.min()) - 0.5, log(tau.max()) + 0.5, 10 * f.shape[0]
)
num_freqs: int = f.size
+ num_taus: int = tau.size
epsilon: float = _compute_epsilon(f, rbf_shape, shape_coeff, rbf_type)
+
num_steps: int = 0
num_steps += 3 # A_re, A_im, and M matrices
- num_steps += len(lambda_values)
- if credible_intervals is True:
- num_steps += timeout
+ if credible_intervals:
+ num_steps += num_samples
+
prog: Progress
with Progress("Preparing matrices", total=num_steps + 1) as prog:
- A_re: NDArray[float64]
- A_im: NDArray[float64]
i: int
args = [
(
@@ -1687,6 +1869,9 @@ def calculate_drt_tr_rbf(
rbf_type,
),
]
+
+ A_re: NDArray[float64]
+ A_im: NDArray[float64]
if num_procs > 1:
with Pool(2) as pool:
for i, res in enumerate(pool.imap(_assemble_A_matrix, args)):
@@ -1700,97 +1885,124 @@ def calculate_drt_tr_rbf(
prog.increment()
A_im = _assemble_A_matrix(args[1])
prog.increment()
+
M: NDArray[float64] = _assemble_M_matrix(
tau,
epsilon,
derivative_order,
rbf_type,
)
- prog.increment()
- if len(lambda_values) == 1 and lambda_values[0] < -1.5:
- lambda_values[0] = _l_curve_corner_search(
- lambda _: _l_curve_P(
- _,
- A_re,
- A_im,
- Z_exp,
- M,
- f,
- tau,
- tau_fine,
- epsilon,
- mode,
- rbf_type,
- inductance,
- maximum_symmetry,
- ),
- minimum=1e-10,
- maximum=1,
+
+ b_re: NDArray[float64] = Z_exp.real
+ b_im: NDArray[float64] = Z_exp.imag
+
+ num_RL: int = -1
+ if mode == "complex":
+ A_re, A_im, M, num_RL = _prepare_complex_matrices(
+ A_re,
+ A_im,
+ b_re,
+ b_im,
+ M,
+ f,
+ num_freqs,
+ num_taus,
+ inductance,
)
- args = (
- (
+ elif mode == "real":
+ A_re, A_im, M, num_RL = _prepare_real_matrices(
A_re,
A_im,
- Z_exp,
+ b_re,
+ b_im,
+ M,
+ num_freqs,
+ num_taus,
+ )
+ elif mode == "imaginary":
+ A_re, A_im, M, num_RL = _prepare_imaginary_matrices(
+ A_re,
+ A_im,
+ b_re,
+ b_im,
M,
- lambda_value,
f,
- tau,
- tau_fine,
- epsilon,
- mode,
- rbf_type,
+ num_freqs,
+ num_taus,
inductance,
- maximum_symmetry if lambda_values.size > 1 else 1.0,
)
- for lambda_value in lambda_values
- )
+
+ if cross_validation != "":
+ prog.set_message("Picking lambda value")
+ lambda_value = _pick_lambda(
+ A_re,
+ A_im,
+ b_re,
+ b_im,
+ M,
+ lambda_value,
+ cross_validation,
+ )
+
+ prog.increment()
prog.set_message("Calculating DRT")
- results: List[
- Tuple[
- float,
- float,
- NDArray[float64],
- ComplexImpedances,
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- NDArray[float64],
- int,
- ]
- ] = []
- if len(lambda_values) > 1 and num_procs > 1:
- with Pool(num_procs) as pool:
- for res in pool.imap_unordered(_lambda_process, args):
- if res is not None:
- results.append(res)
- prog.increment()
- else:
- for res in map(_lambda_process, args):
- if res is not None:
- results.append(res)
- prog.increment()
- if len(results) == 0:
- raise DRTError("Failed to perform calculations! Try tweaking the settings.")
- if len(results) > 1:
- results.sort(key=lambda _: _[0])
- lambda_value = _suggest_lambda(
- array(list(map(lambda _: _[0], results)), dtype=float64),
- array(list(map(lambda _: _[1], results)), dtype=float64),
+
+ H: NDArray[float64]
+ c: NDArray[float64]
+ if mode == "complex":
+ H, c = _quad_format_combined(
+ A_re,
+ A_im,
+ b_re,
+ b_im,
+ M,
+ lambda_value,
+ )
+ elif mode == "real":
+ H, c = _quad_format(
+ A_re,
+ b_re,
+ M,
+ lambda_value,
+ )
+ elif mode == "imaginary":
+ H, c = _quad_format(
+ A_im,
+ b_im,
+ M,
+ lambda_value,
+ )
+
+ if not (0 <= num_RL <= 2, num_RL):
+ raise ValueError(f"Expected 0 <= {num_RL=} = 2")
+
+ # Enforce positivity constraint
+ h: NDArray[float64] = zeros(b_re.shape[0] + num_RL)
+ G: NDArray[float64] = -eye(h.shape[0])
+ x: NDArray[float64] = _solve_qp_cvxopt(
+ H,
+ c,
+ G=G,
+ h=h,
)
- results = list(filter(lambda _: _[0] == lambda_value, results))
- lambda_value, _, x, Z_fit, A_re, A_im, M, H, c, num_RL = results[0]
+
+ Z_fit: ComplexImpedances = array(
+ list(map(lambda _: complex(*_), zip(A_re @ x, A_im @ x))),
+ dtype=ComplexImpedance,
+ )
+
sigma_re_im: float
if mode == "complex":
- sigma_re_im = std(
- concatenate([Z_fit.real - Z_exp.real, Z_fit.imag - Z_exp.imag])
- )
+ sigma_re_im = std(concatenate([Z_fit.real - b_re, Z_fit.imag - b_im]))
+
elif mode == "real":
- sigma_re_im = std(Z_fit.real - Z_exp.real)
+ sigma_re_im = std(Z_fit.real - b_re)
+
elif mode == "imaginary":
- sigma_re_im = std(Z_fit.imag - Z_exp.imag)
+ sigma_re_im = std(Z_fit.imag - b_im)
+
inv_V: NDArray[float64] = 1 / sigma_re_im**2 * eye(num_freqs)
+
Sigma_inv: NDArray[float64]
mu_numerator: NDArray[float64]
if mode == "complex":
@@ -1799,19 +2011,28 @@ def calculate_drt_tr_rbf(
+ (A_im.T @ inv_V @ A_im)
+ (lambda_value / sigma_re_im**2) * M
)
- mu_numerator = A_re.T @ inv_V @ Z_exp.real + A_im.T @ inv_V @ Z_exp.imag
+ mu_numerator = A_re.T @ inv_V @ b_re + A_im.T @ inv_V @ b_im
+
elif mode == "real":
Sigma_inv = (A_re.T @ inv_V @ A_re) + (lambda_value / sigma_re_im**2) * M
- mu_numerator = A_re.T @ inv_V @ Z_exp.real
+ mu_numerator = A_re.T @ inv_V @ b_re
+
elif mode == "imaginary":
Sigma_inv = (A_im.T @ inv_V @ A_im) + (lambda_value / sigma_re_im**2) * M
- mu_numerator = A_im.T @ inv_V @ Z_exp.imag
+ mu_numerator = A_im.T @ inv_V @ b_im
+
Sigma_inv = (Sigma_inv + Sigma_inv.T) / 2
+ if not _is_positive_definite(Sigma_inv):
+ Sigma_inv = _nearest_positive_definite(Sigma_inv)
+
L_Sigma_inv: NDArray[float64] = cholesky(Sigma_inv)
- mu: NDArray[float64] = solve_linalg(
- L_Sigma_inv.T, solve_linalg(L_Sigma_inv, mu_numerator)
+ mu: NDArray[float64] = solve(
+ L_Sigma_inv.T,
+ solve(L_Sigma_inv, mu_numerator),
)
- # TODO: Why were L and R defined only to not be used?
+
+ # These L and R values are used by pyDRTtools when exporting a DRT report
+ # as a CSV file.
L: float
R: float
if num_RL == 0:
@@ -1823,12 +2044,14 @@ def calculate_drt_tr_rbf(
L, R = 0.0, x[0]
elif num_RL == 2:
L, R = x[0:2]
+
x = x[num_RL:]
time_constants: TimeConstants
time_constants, gamma = _x_to_gamma(x, tau_fine, tau, epsilon, rbf_type)
- if credible_intervals is True:
+
+ if credible_intervals:
prog.set_message("Calculating credible intervals")
- args = (
+ mean_gamma, lower_gamma, upper_gamma = _calculate_credible_intervals(
num_RL,
num_samples,
mu,
@@ -1838,32 +2061,16 @@ def calculate_drt_tr_rbf(
tau,
epsilon,
rbf_type,
+ timeout,
+ prog,
)
- pool = Pool(1)
- async_result = pool.map_async(_calculate_credible_intervals, (args,))
- while timeout > 0:
- if async_result.ready():
- prog.increment(step=timeout)
- break
- sleep(1.0)
- prog.increment()
- timeout -= 1
- try:
- (mean_gamma, lower_gamma, upper_gamma,) = async_result.get(
- timeout=2
- )[0]
- except MPTimeoutError:
- pool.close()
- raise DRTError(
- "Timed out while calculating credible intervals! Adjust the timeout limit and try again."
- )
- pool.close()
else:
mean_gamma, lower_gamma, upper_gamma = (
array([]), # Mean
array([]), # Lower bound
array([]), # Upper bound
)
+
return TRRBFResult(
time_constants=time_constants,
gammas=gamma,
diff --git a/src/pyimpspec/analysis/drt/utility.py b/src/pyimpspec/analysis/drt/utility.py
index 404b969..dace1ab 100644
--- a/src/pyimpspec/analysis/drt/utility.py
+++ b/src/pyimpspec/analysis/drt/utility.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,22 +17,78 @@
# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
# the LICENSES folder.
-from typing import (
- Callable,
- List,
- Tuple,
-)
from warnings import (
catch_warnings,
filterwarnings,
)
from numpy import (
+ diag,
+ dot,
+ eye,
float64,
- inf,
- sqrt,
log10 as log,
+ ndarray,
+ sqrt,
+ spacing,
+ min as array_min,
+)
+from numpy.linalg import (
+ LinAlgError,
+ cholesky,
+ svd,
+ eigvals,
+ norm,
)
from numpy.typing import NDArray
+from pyimpspec.typing.helpers import (
+ Callable,
+ List,
+ Tuple,
+)
+
+
+def _nearest_positive_definite(A: ndarray) -> ndarray:
+ """
+ Find the nearest positive definite matrix of the input matrix A.
+
+ Based on John D'Errico's "nearestSPD" (https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd).
+ Ported by the developers of pyDRTtools.
+
+ See also:
+ - N.J. Higham, "Computing a nearest symmetric positive semidefinite matrix" (1988, https://doi.org/10.1016/0024-3795(88)90223-6)
+ """
+
+ B: ndarray = (A + A.T) / 2
+ Sigma_mat: ndarray
+ V: ndarray
+ _, Sigma_mat, V = svd(B)
+
+ H: ndarray = dot(V.T, dot(diag(Sigma_mat), V))
+
+ A_nPD: ndarray = (B + H) / 2
+ A_symm: ndarray = (A_nPD + A_nPD.T) / 2
+
+ k: int = 1
+ I: ndarray = eye(A_symm.shape[0])
+
+ while not _is_positive_definite(A_symm):
+ # The MATLAB function chol accepts matrices with eigenvalue = 0,
+ # but NumPy does not so we replace the MATLAB function eps(min_eig)
+ # with the following one
+ eps: float = spacing(norm(A_symm))
+ min_eig: float = min(0.0, array_min(eigvals(A_symm).real))
+ A_symm += I * (-min_eig * k**2 + eps)
+ k += 1
+
+ return A_symm
+
+
+def _is_positive_definite(matrix: ndarray) -> bool:
+ try:
+ cholesky(matrix)
+ return True
+ except LinAlgError:
+ return False
def _l_curve_corner_search(
@@ -44,8 +100,8 @@ def _l_curve_corner_search(
"""
Implementation of algorithm 1 in DOI:10.1088/2633-1357/abad0d
- l_curve_P must be a function that takes a regularization parameter (or
- lambda value)and returns a tuple containing the base-10 log of the
+ l_curve_P must be a function that takes a regularization parameter (i.e.,
+ lambda value) and returns a tuple containing the base-10 log of the
residual norm squared and the base-10 log of the solution norm squared.
Returns
@@ -55,11 +111,15 @@ def _l_curve_corner_search(
"""
# 'Line N' comments refer to the line numbers in algorithm 1
# in DOI:10.1088/2633-1357/abad0d
+
phi = (1 + sqrt(5)) / 2 # Line 3
def update_lambda(lambdas: List[float], index: int):
- assert index == 1 or index == 2, index
+ if index not in (1, 2):
+ raise ValueError("Expected the index to be 1 or 2")
+
xs: NDArray[float64] = log(lambdas)
+
if index == 1:
lambdas[1] = 10 ** ((xs[3] + phi * xs[0]) / (1 + phi))
elif index == 2:
@@ -68,13 +128,16 @@ def update_lambda(lambdas: List[float], index: int):
def menger(Ps: List[Tuple[float64, float64]]) -> float64:
xi: List[float64] = [_[0] for _ in Ps]
eta: List[float64] = [_[1] for _ in Ps]
+
j: int = 0
k: int = 1
l: int = 2
+
with catch_warnings():
filterwarnings("ignore", "divide by zero encountered in scalar divide")
filterwarnings("ignore", "invalid value encountered in scalar divide")
filterwarnings("ignore", "invalid value encountered in scalar subtract")
+
num: float64 = 2 * (
xi[j] * eta[k]
+ xi[k] * eta[l]
@@ -83,40 +146,50 @@ def menger(Ps: List[Tuple[float64, float64]]) -> float64:
- xi[k] * eta[j]
- xi[l] * eta[k]
)
+
den: float64 = (
((xi[k] - xi[j]) ** 2 + (eta[k] - eta[j]) ** 2)
* ((xi[l] - xi[k]) ** 2 + (eta[l] - eta[k]) ** 2)
* ((xi[j] - xi[l]) ** 2 + (eta[j] - eta[l]) ** 2)
) ** (1 / 2)
+
return num / den
lambdas: List[float] = [minimum, 1.0, 1.0, maximum] # Line 1
update_lambda(lambdas, index=1) # Line 4
update_lambda(lambdas, index=2) # Line 5
+
Ps: List[Tuple[float64, float64]] = []
+
lm: float
for lm in lambdas: # Line 6
Ps.append(l_curve_P(lm)) # Line 7
+
optimal_lambda: float = -1.0
+
while (lambdas[3] - lambdas[0]) / lambdas[3] >= epsilon:
C_2: float64 = menger(Ps[:-1]) # Line 10
C_3: float64 = menger(Ps[1:]) # Line 11
+
while C_3 <= 0.0: # Line 18
lambdas[3], Ps[3] = lambdas[2], Ps[2] # Line 13
lambdas[2], Ps[2] = lambdas[1], Ps[1] # Line 14
update_lambda(lambdas, index=1) # Line 15
Ps[1] = l_curve_P(lambdas[1]) # Line 16
C_3 = menger(Ps[1:]) # Line 17
+
if C_2 > C_3: # Line 19
optimal_lambda = lambdas[1] # Line 20
lambdas[3], Ps[3] = lambdas[2], Ps[2] # Line 21
lambdas[2], Ps[2] = lambdas[1], Ps[1] # Line 22
update_lambda(lambdas, index=1) # Line 23
Ps[1] = l_curve_P(lambdas[1]) # Line 24
- else: # Line
+
+ else:
optimal_lambda = lambdas[2] # Line 26
lambdas[0], Ps[0] = lambdas[1], Ps[1] # Line 27
lambdas[1], Ps[1] = lambdas[2], Ps[2] # Line 28
update_lambda(lambdas, index=2) # Line 29
Ps[2] = l_curve_P(lambdas[2]) # Line 30
+
return optimal_lambda # Line 33
diff --git a/src/pyimpspec/analysis/fitting.py b/src/pyimpspec/analysis/fitting.py
index 706b300..f7b0d1b 100644
--- a/src/pyimpspec/analysis/fitting.py
+++ b/src/pyimpspec/analysis/fitting.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,6 +20,7 @@
from copy import deepcopy
from dataclasses import dataclass
from multiprocessing import Pool
+from multiprocessing.context import TimeoutError as MPTimeoutError
from typing import (
Callable,
Dict,
@@ -30,18 +31,18 @@
Union,
)
from traceback import format_exc
-import warnings
+from warnings import (
+ catch_warnings,
+ filterwarnings,
+)
from numpy import (
angle,
array,
float64,
inf,
- integer,
isnan,
- issubdtype,
log10 as log,
nan,
- ndarray,
ones,
)
from numpy.typing import NDArray
@@ -64,6 +65,12 @@
Phases,
Residuals,
)
+from pyimpspec.typing.helpers import (
+ _is_boolean,
+ _is_complex_array,
+ _is_floating_array,
+ _is_integer,
+)
@dataclass(frozen=True)
@@ -95,10 +102,13 @@ def __str__(self) -> str:
string: str = f"{self.value:.6e}"
if not isnan(self.stderr):
string += f" +/- {self.stderr:.6e}"
+
if self.unit != "":
string += f" {self.unit}"
+
if self.fixed:
string += " (fixed)"
+
return string
def get_value(self) -> float:
@@ -151,6 +161,7 @@ def get_relative_error(self) -> float:
"""
if isnan(self.stderr):
return self.stderr
+
return (self.stderr or 0.0) / self.value
@@ -213,6 +224,7 @@ def get_label(self) -> str:
cdc: str = self.circuit.to_string()
if cdc.startswith("[") and cdc.endswith("]"):
cdc = cdc[1:-1]
+
return cdc
def get_frequencies(self, num_per_decade: int = -1) -> Frequencies:
@@ -230,9 +242,9 @@ def get_frequencies(self, num_per_decade: int = -1) -> Frequencies:
-------
|Frequencies|
"""
- assert issubdtype(type(num_per_decade), integer), num_per_decade
if num_per_decade > 0:
return _interpolate(self.frequencies, num_per_decade)
+
return self.frequencies
def get_impedances(self, num_per_decade: int = -1) -> ComplexImpedances:
@@ -250,13 +262,14 @@ def get_impedances(self, num_per_decade: int = -1) -> ComplexImpedances:
-------
|ComplexImpedances|
"""
- assert issubdtype(type(num_per_decade), integer), num_per_decade
if num_per_decade > 0:
return self.circuit.get_impedances(self.get_frequencies(num_per_decade))
+
return self.impedances
def get_nyquist_data(
- self, num_per_decade: int = -1
+ self,
+ num_per_decade: int = -1,
) -> Tuple[Impedances, Impedances]:
"""
Get the data necessary to plot this FitResult as a Nyquist plot: the real and the negative imaginary parts of the impedances.
@@ -272,13 +285,13 @@ def get_nyquist_data(
-------
Tuple[|Impedances|, |Impedances|]
"""
- assert issubdtype(type(num_per_decade), integer), num_per_decade
if num_per_decade > 0:
Z: ComplexImpedances = self.get_impedances(num_per_decade)
return (
Z.real,
-Z.imag,
)
+
return (
self.impedances.real,
-self.impedances.imag,
@@ -302,19 +315,19 @@ def get_bode_data(
-------
Tuple[|Frequencies|, |Impedances|, |Phases|]
"""
- assert issubdtype(type(num_per_decade), integer), num_per_decade
+ f: Frequencies
+ Z: ComplexImpedances
if num_per_decade > 0:
- f: Frequencies = self.get_frequencies(num_per_decade)
- Z: ComplexImpedances = self.circuit.get_impedances(f)
- return (
- f,
- abs(Z),
- -angle(Z, deg=True),
- )
+ f = self.get_frequencies(num_per_decade)
+ Z = self.circuit.get_impedances(f)
+ else:
+ f = self.frequencies
+ Z = self.impedances
+
return (
- self.frequencies,
- abs(self.impedances),
- -angle(self.impedances, deg=True),
+ f,
+ abs(Z),
+ -angle(Z, deg=True),
)
def get_residuals_data(
@@ -342,6 +355,7 @@ def get_parameters(self) -> Dict[str, Dict[str, FittedParameter]]:
-------
Dict[str, Dict[str, FittedParameter]]
"""
+ # TODO: Deprecated or unimplemented?
def to_parameters_dataframe(
self,
@@ -360,13 +374,16 @@ def to_parameters_dataframe(
"""
from pandas import DataFrame
- assert isinstance(running, bool), running
+ if not _is_boolean(running):
+ raise TypeError(f"Expected a boolean instead of {running=}")
+
element_names: List[str] = []
parameter_labels: List[str] = []
fitted_values: List[float] = []
stderr_values: List[Optional[float]] = []
fixed: List[str] = []
units: List[str] = []
+
element_name: str
parameters: Dict[str, FittedParameter]
internal_identifiers: Dict[
@@ -375,6 +392,7 @@ def to_parameters_dataframe(
external_identifiers: Dict[
Element, int
] = self.circuit.generate_element_identifiers(running=False)
+
element: Element
for element, ident in external_identifiers.items():
element_name = self.circuit.get_element_name(
@@ -382,6 +400,7 @@ def to_parameters_dataframe(
identifiers=external_identifiers,
)
parameters = self.parameters[element_name]
+
parameter_label: str
parameter: FittedParameter
for parameter_label, parameter in parameters.items():
@@ -389,19 +408,21 @@ def to_parameters_dataframe(
self.circuit.get_element_name(
element,
identifiers=external_identifiers
- if running is False
+ if not running
else internal_identifiers,
)
)
parameter_labels.append(parameter_label)
+
fitted_values.append(parameter.value)
stderr_values.append(
parameter.stderr / parameter.value * 100
- if parameter.stderr is not None and parameter.fixed is False
+ if parameter.stderr is not None and not parameter.fixed
else nan
)
fixed.append("Yes" if parameter.fixed else "No")
units.append(parameter.unit)
+
return DataFrame.from_dict(
{
"Element": element_names,
@@ -437,6 +458,7 @@ def to_statistics_dataframe(self) -> "DataFrame": # noqa: F821
"Method": self.method,
"Weight": self.weight,
}
+
return DataFrame.from_dict(
{
"Label": list(statistics.keys()),
@@ -450,17 +472,26 @@ def _to_lmfit(
) -> "Parameters": # noqa: F821
from lmfit import Parameters
- assert isinstance(identifiers, dict), identifiers
+ if not isinstance(identifiers, dict):
+ raise TypeError(f"Expected a dictionary instead of {identifiers=}")
+
result: Parameters = Parameters()
+
ident: int
element: Element
for ident, element in identifiers.items():
lower_limits: Dict[str, float] = element.get_lower_limits()
upper_limits: Dict[str, float] = element.get_upper_limits()
fixed: Dict[str, bool] = element.are_fixed()
+
symbol: str
value: float
for symbol, value in element.get_values().items():
+ if not (lower_limits[symbol] <= value <= upper_limits[symbol]):
+ raise ValueError(
+ f"Expected {lower_limits[symbol]} <= {value} <= {upper_limits[symbol]}"
+ )
+
result.add(
f"{symbol}_{ident}",
value,
@@ -468,6 +499,7 @@ def _to_lmfit(
max=upper_limits[symbol],
vary=not fixed[symbol],
)
+
return result
@@ -477,9 +509,14 @@ def _from_lmfit(
):
from lmfit import Parameters
- assert isinstance(parameters, Parameters), parameters
- assert isinstance(identifiers, dict), identifiers
+ if not isinstance(parameters, Parameters):
+ raise TypeError(f"Expected a Parameters instead of {parameters=}")
+
+ if not isinstance(identifiers, dict):
+ raise TypeError(f"Expected a dictionary instead of {identifiers=}")
+
result: Dict[int, Dict[str, float]] = {_: {} for _ in identifiers}
+
key: str
value: float
for key, value in parameters.valuesdict().items():
@@ -488,6 +525,7 @@ def _from_lmfit(
symbol, ident = key.rsplit("_", 1) # type: ignore
ident = int(ident)
result[ident][symbol] = float(value)
+
element: Element
for ident, element in identifiers.items():
element.set_values(**result[ident])
@@ -510,6 +548,7 @@ def _residual(
],
dtype=float64,
)
+
return weight_func(Z_exp, Z_fit) * errors
@@ -534,6 +573,7 @@ def _proportional_weight(
weight: NDArray[float64] = ones(shape=(2, Z_exp.size), dtype=float64)
weight[0] = weight[0] / Z_fit.real**2
weight[1] = weight[1] / Z_fit.imag**2
+
return weight
@@ -586,8 +626,6 @@ def _extract_parameters(
) -> Dict[str, Dict[str, FittedParameter]]:
from lmfit.minimizer import MinimizerResult
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(fit, MinimizerResult), fit
parameters: Dict[str, Dict[str, FittedParameter]] = {}
internal_identifiers: Dict[int, Element] = {
v: k for k, v in circuit.generate_element_identifiers(running=True).items()
@@ -595,6 +633,7 @@ def _extract_parameters(
external_identifiers: Dict[Element, int] = circuit.generate_element_identifiers(
running=False
)
+
internal_id: int
element: Element
for internal_id, element in internal_identifiers.items():
@@ -602,9 +641,13 @@ def _extract_parameters(
symbol: str = element.get_symbol()
if element_name == symbol:
element_name = f"{symbol}_{external_identifiers[element]}"
- assert element_name not in parameters, element_name
+
+ if element_name in parameters:
+ raise KeyError(f"Expected {element_name=} not to exist in {parameters=}")
+
parameters[element_name] = {}
units: Dict[str, str] = element.get_units()
+
# Parameters that were not fixed
variable_name: str
for variable_name in filter(
@@ -617,6 +660,7 @@ def _extract_parameters(
float(stderr)
except TypeError:
stderr = nan
+
variable_name, _ = variable_name.rsplit("_", 1)
parameters[element_name][variable_name] = FittedParameter(
value=par.value,
@@ -624,17 +668,20 @@ def _extract_parameters(
fixed=False,
unit=units[variable_name],
)
+
# Remaining parameters are fixed
value: float
for name, value in element.get_values().items():
if name in parameters[element_name]:
continue
+
parameters[element_name][name] = FittedParameter(
value=value,
stderr=nan,
fixed=True,
unit=units[name],
)
+
return parameters
@@ -652,21 +699,38 @@ def _fit_process(
max_nfev: int
auto: bool
circuit, f, Z_exp, method, weight, max_nfev, auto = args
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(f, ndarray), f
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(method, str), method
- assert isinstance(weight, str), weight
- assert issubdtype(type(max_nfev), integer), max_nfev
- assert isinstance(auto, bool), auto
+
+ if not isinstance(circuit, Circuit):
+ raise TypeError(f"Expected a Circuit instead of {circuit=}")
+
+ if not _is_floating_array(f):
+ raise TypeError(f"Expected an NDArray[float] instead of {f=}")
+
+ if not _is_complex_array(Z_exp):
+ raise TypeError(f"Expected an NDArray[complex] instead of {Z_exp=}")
+
+ if not isinstance(method, str):
+ raise TypeError(f"Expected a string instead of {method=}")
+
+ if not isinstance(weight, str):
+ raise TypeError(f"Expected a string instead of {weight=}")
+
+ if not _is_integer(max_nfev):
+ raise TypeError(f"Expected an integer instead of {max_nfev=}")
+
+ if not _is_boolean(auto):
+ raise TypeError(f"Expected a boolean instead of {auto=}")
+
weight_func: Callable = _WEIGHT_FUNCTIONS[weight]
identifiers: Dict[int, Element] = {
v: k for k, v in circuit.generate_element_identifiers(running=True).items()
}
- with warnings.catch_warnings():
+
+ with catch_warnings():
if auto:
- warnings.filterwarnings("error")
- warnings.filterwarnings("ignore", category=DeprecationWarning)
+ filterwarnings("error")
+ filterwarnings("ignore", category=DeprecationWarning)
+
try:
fit: MinimizerResult = minimize(
_residual,
@@ -681,7 +745,8 @@ def _fit_process(
),
max_nfev=None if max_nfev < 1 else max_nfev,
)
- except (Exception, Warning):
+
+ except (Exception, Warning): # TODO
return (
circuit,
inf,
@@ -690,9 +755,12 @@ def _fit_process(
weight,
format_exc(),
)
+
if fit.ndata < len(f) and log(fit.chisqr) < -50:
return (circuit, inf, None, method, weight, "Invalid result!")
+
_from_lmfit(fit.params, identifiers)
+
return (
circuit,
_calculate_pseudo_chisqr(Z_exp=Z_exp, Z_fit=circuit.get_impedances(f)),
@@ -711,19 +779,24 @@ def validate_circuit(circuit: Circuit):
----------
circuit: Circuit
"""
- assert circuit.to_string() not in ["[]", "()"], "The circuit has no elements!"
+ if circuit.to_string() in ["[]", "()"]:
+ raise ValueError("The circuit has no elements!")
+
identifiers: Dict[Element, int] = circuit.generate_element_identifiers(
running=False
)
element_names: Set[str] = set()
+
element: Element
ident: int
for element, ident in identifiers.items():
name: str = circuit.get_element_name(element, identifiers)
- assert (
- name not in element_names
- ), f"Two or more elements of the same type have the same name ({name})!"
- element_names.add(name)
+ if name in element_names:
+ raise ValueError(
+ f"Two or more elements of the same type have the same name ({name=})"
+ )
+ else:
+ element_names.add(name)
def fit_circuit(
@@ -732,7 +805,8 @@ def fit_circuit(
method: str = "auto",
weight: str = "auto",
max_nfev: int = -1,
- num_procs: int = 0,
+ num_procs: int = -1,
+ timeout: int = 0,
) -> FitResult:
"""
Fit a circuit to a data set.
@@ -765,61 +839,65 @@ def fit_circuit(
A value less than 1 results in an attempt to figure out a suitable value based on, e.g., the number of cores detected.
Additionally, a negative value can be used to reduce the number of processes by that much (e.g., to leave one core for a GUI thread).
+ timeout: int, optional
+ The amount of time in seconds that a single fit is allowed to take before being timed out.
+ If this values is less than one, then no time limit is imposed.
+
Returns
-------
FitResult
"""
from lmfit.minimizer import MinimizerResult
- assert isinstance(circuit, Circuit), (
- type(circuit),
- circuit,
- )
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert isinstance(method, str), (
- type(method),
- method,
- )
- if not (method in _METHODS or method == "auto"):
- raise FittingError(
+ if not isinstance(circuit, Circuit):
+ raise TypeError(f"Expected a Circuit instead of {circuit=}")
+ else:
+ validate_circuit(circuit)
+
+ if not isinstance(method, str):
+ raise TypeError(f"Expected a string instead of {method=}")
+ elif not (method in _METHODS or method == "auto"):
+ raise ValueError(
"Valid method values: '" + "', '".join(_METHODS) + "', and 'auto'"
)
- assert isinstance(weight, str), (
- type(weight),
- weight,
- )
- if not (weight in _WEIGHT_FUNCTIONS or weight == "auto"):
- raise FittingError(
+
+ if not isinstance(weight, str):
+ raise TypeError(f"Expected a string instead of {weight=}")
+ elif not (weight in _WEIGHT_FUNCTIONS or weight == "auto"):
+ raise ValueError(
"Valid weight values: '" + "', '".join(_WEIGHT_FUNCTIONS) + "', and 'auto'"
)
- assert issubdtype(type(max_nfev), integer), (
- type(max_nfev),
- max_nfev,
- )
- assert issubdtype(type(num_procs), integer), (
- type(num_procs),
- num_procs,
- )
- validate_circuit(circuit)
- if num_procs < 1:
- num_procs = _get_default_num_procs() - abs(num_procs)
- if num_procs < 1:
- num_procs = 1
+
+ if not _is_integer(max_nfev):
+ raise TypeError(f"Expected an integer instead of {max_nfev=}")
+
+ if not _is_integer(num_procs):
+ raise TypeError(f"Expected an integer instead of {num_procs=}")
+ elif num_procs < 1:
+ num_procs = max((_get_default_num_procs() - abs(num_procs), 1))
+
+ if not _is_integer(timeout):
+ raise TypeError(f"Expected an integer instead of {timeout=}")
+ elif timeout < 0:
+ raise ValueError(
+ f"Expected an integer equal to or greater than zero instead of {timeout=}"
+ )
+
num_steps: int = (len(_METHODS) if method == "auto" else 1) * (
len(_WEIGHT_FUNCTIONS) if weight == "auto" else 1
)
+
prog: Progress
with Progress("Preparing to fit", total=num_steps + 1) as prog:
- f: Frequencies = data.get_frequencies()
- Z_exp: ComplexImpedances = data.get_impedances()
fits: List[
Tuple[Circuit, float, Optional["MinimizerResult"], str, str, str]
] = []
+
methods: List[str] = [method] if method != "auto" else _METHODS
weights: List[str] = (
[weight] if weight != "auto" else list(_WEIGHT_FUNCTIONS.keys())
)
+
method_weight_combos: List[Tuple[str, str]] = []
for method in methods:
for weight in weights:
@@ -829,6 +907,14 @@ def fit_circuit(
weight,
)
)
+
+ f: Frequencies = data.get_frequencies()
+ if len(f) < 2:
+ raise ValueError(
+ f"There are fewer than two unmasked data points in the '{data.get_label()}' data set parsed from '{data.get_path()}'"
+ )
+
+ Z_exp: ComplexImpedances = data.get_impedances()
args = (
(
deepcopy(circuit),
@@ -841,26 +927,54 @@ def fit_circuit(
)
for (method, weight) in method_weight_combos
)
- prog.set_message("Performing fit(s)")
- if len(method_weight_combos) > 1 and num_procs > 1:
+
+ prog.set_message(
+ "Performing fit" + ("s" if len(method_weight_combos) > 1 else "")
+ )
+ res: Tuple[Circuit, float, Optional["MinimizerResult"], str, str, str]
+ if num_procs > 1 or timeout > 0:
with Pool(num_procs) as pool:
- for res in pool.imap_unordered(_fit_process, args):
+ iterator = pool.imap(_fit_process, args, 1)
+ while True:
+ try:
+ if timeout > 0:
+ res = iterator.next(timeout=timeout)
+ else:
+ res = iterator.next()
+ except MPTimeoutError:
+ if len(method_weight_combos) > 1:
+ raise FittingError(
+ "Timed out before finishing fitting using all combinations of methods and weights! Consider either reducing the number of combinations to test or increasing the time limit."
+ )
+ else:
+ raise FittingError(
+ "Timed out before finishing fitting! Consider increasing the time limit."
+ )
+ except StopIteration:
+ break
+
fits.append(res)
prog.increment()
+
else:
for res in map(_fit_process, args):
fits.append(res)
prog.increment()
- fits.sort(key=lambda _: log(_[1]) if _[2] is not None else inf)
+
if not fits:
raise FittingError("No valid results generated!")
+
+ fits.sort(key=lambda _: log(_[1]) if _[2] is not None else inf)
+
fit: Optional[MinimizerResult]
error_msg: str
Xps: float
circuit, Xps, fit, method, weight, error_msg = fits[0]
if fit is None:
raise FittingError(error_msg)
+
Z_fit: ComplexImpedances = circuit.get_impedances(f)
+
return FitResult(
circuit=circuit,
parameters=_extract_parameters(circuit, fit),
diff --git a/src/pyimpspec/analysis/kramers_kronig.py b/src/pyimpspec/analysis/kramers_kronig.py
deleted file mode 100644
index db5f013..0000000
--- a/src/pyimpspec/analysis/kramers_kronig.py
+++ /dev/null
@@ -1,1464 +0,0 @@
-# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
-# the LICENSES folder.
-
-from dataclasses import dataclass
-from multiprocessing import (
- Pool,
- Value,
-)
-from typing import (
- Callable,
- Dict,
- Generator,
- List,
- Optional,
- Tuple,
- Union,
-)
-from numpy import (
- abs,
- angle,
- array,
- float64,
- floating,
- inf,
- integer,
- isinf,
- issubdtype,
- log10 as log,
- min,
- max,
- nan,
- ndarray,
- pi,
- sum as array_sum,
- zeros,
-)
-from numpy.linalg import (
- inv,
- pinv,
-)
-from numpy.typing import NDArray
-from pyimpspec.exceptions import KramersKronigError
-from pyimpspec.analysis.fitting import (
- _METHODS,
- _from_lmfit,
- _to_lmfit,
-)
-from pyimpspec.analysis.utility import (
- _boukamp_weight,
- _calculate_pseudo_chisqr,
- _calculate_residuals,
- _interpolate,
- _get_default_num_procs,
-)
-from pyimpspec.circuit import parse_cdc
-from pyimpspec.circuit.base import Element
-from pyimpspec.circuit.circuit import Circuit
-from pyimpspec.circuit.connections import Series
-from pyimpspec.circuit.elements import (
- Capacitor,
- Resistor,
- Inductor,
-)
-from pyimpspec.data import DataSet
-from pyimpspec.progress import Progress
-from pyimpspec.typing import (
- ComplexImpedances,
- ComplexResiduals,
- Frequencies,
- Impedances,
- Phases,
- Residuals,
-)
-
-
-@dataclass(frozen=True)
-class TestResult:
- """
- An object representing the results of a linear Kramers-Kronig test applied to a data set.
-
- Parameters
- ----------
- circuit: Circuit
- The fitted circuit.
-
- num_RC: int
- The final number of RC elements in the fitted model (Boukamp, 1995).
-
- mu: float
- The |mu| value of the final fit (eq. 21 in Schönleber et al., 2014).
-
- pseudo_chisqr: float
- The pseudo chi-squared value (|pseudo chi-squared|, eq. 14 in Boukamp, 1995).
-
- frequencies: |Frequencies|
- The frequencies used to perform the test.
-
- impedances: |ComplexImpedances|
- The impedances produced by the fitted circuit at each of the tested frequencies.
-
- residuals: |ComplexResiduals|
- The residuals for the real (eq. 15 in Schönleber et al., 2014) and imaginary (eq. 16 in Schönleber et al., 2014) parts of the fit.
- """
-
- circuit: Circuit
- num_RC: int
- mu: float
- pseudo_chisqr: float
- frequencies: Frequencies
- impedances: ComplexImpedances
- residuals: ComplexResiduals
-
- def __repr__(self) -> str:
- return f"TestResult (num_RC={self.num_RC}, {hex(id(self))})"
-
- def get_label(self) -> str:
- """
- Get the label of this result
-
- Returns
- -------
- str
- """
- label: str = f"#(RC)={self.num_RC}"
- cdc: str = self.circuit.to_string()
- if "C" in cdc:
- label += ", C"
- if "L" in cdc:
- label += "+L"
- elif "L" in cdc:
- label += ", L"
- return label
-
- def get_frequencies(self, num_per_decade: int = -1) -> Frequencies:
- """
- Get the frequencies in the tested frequency range.
-
- Parameters
- ----------
- num_per_decade: int, optional
- The number of points per decade.
- A positive value results in frequencies being calculated within the original frequency range.
- Otherwise, only the original frequencies are used.
-
- Returns
- -------
- |Frequencies|
- """
- assert issubdtype(type(num_per_decade), integer), num_per_decade
- if num_per_decade > 0:
- return _interpolate(self.frequencies, num_per_decade)
- return self.frequencies
-
- def get_impedances(self, num_per_decade: int = -1) -> ComplexImpedances:
- """
- Get the fitted circuit's impedance response within the tested frequency range.
-
- Parameters
- ----------
- num_per_decade: int, optional
- The number of points per decade.
- A positive value results in data points being calculated using the fitted circuit within the original frequency range.
- Otherwise, only the original frequencies are used.
-
- Returns
- -------
- |ComplexImpedances|
- """
- assert issubdtype(type(num_per_decade), integer), num_per_decade
- if num_per_decade > 0:
- return self.circuit.get_impedances(self.get_frequencies(num_per_decade))
- return self.impedances
-
- def get_nyquist_data(
- self,
- num_per_decade: int = -1,
- ) -> Tuple[Impedances, Impedances]:
- """
- Get the data necessary to plot this TestResult as a Nyquist plot: the real and the negative imaginary parts of the impedances.
-
- Parameters
- ----------
- num_per_decade: int, optional
- The number of points per decade.
- A positive value results in data points being calculated using the fitted circuit within the original frequency range.
- Otherwise, only the original frequencies are used.
-
- Returns
- -------
- Tuple[|Impedances|, |Impedances|]
- """
- assert issubdtype(type(num_per_decade), integer), num_per_decade
- if num_per_decade > 0:
- Z: ComplexImpedances = self.get_impedances(num_per_decade)
- return (
- Z.real,
- -Z.imag,
- )
- return (
- self.impedances.real,
- -self.impedances.imag,
- )
-
- def get_bode_data(
- self,
- num_per_decade: int = -1,
- ) -> Tuple[Frequencies, Impedances, Phases]:
- """
- Get the data necessary to plot this TestResult as a Bode plot: the frequencies, the absolute magnitudes of the impedances, and the negative phase angles/shifts of the impedances in degrees.
-
- Parameters
- ----------
- num_per_decade: int, optional
- The number of points per decade.
- A positive value results in data points being calculated using the fitted circuit within the original frequency range.
- Otherwise, only the original frequencies are used.
-
- Returns
- -------
- Tuple[|Frequencies|, |Impedances|, |Phases|]
- """
- assert issubdtype(type(num_per_decade), integer), num_per_decade
- if num_per_decade > 0:
- f: Frequencies = self.get_frequencies(num_per_decade)
- Z: ComplexImpedances = self.circuit.get_impedances(f)
- return (
- f,
- abs(Z),
- -angle(Z, deg=True),
- )
- return (
- self.frequencies,
- abs(self.impedances),
- -angle(self.impedances, deg=True),
- )
-
- def get_residuals_data(self) -> Tuple[Frequencies, Residuals, Residuals]:
- """
- Get the data necessary to plot the relative residuals for this result: the frequencies, the relative residuals for the real parts of the impedances in percents, and the relative residuals for the imaginary parts of the impedances in percents.
-
- Returns
- -------
- Tuple[|Frequencies|, |Residuals|, |Residuals|]
- """
- return (
- self.frequencies, # type: ignore
- self.residuals.real * 100, # type: ignore
- self.residuals.imag * 100, # type: ignore
- )
-
- def calculate_score(self, mu_criterion: float) -> float:
- """
- Calculate a score based on the provided |mu|-criterion and the statistics of the test result.
- This calculation is part of the modified implementation of the algorithm described by Schönleber et al. (2014).
- A test result with |mu| greater than or equal to the |mu|-criterion will get a score of -numpy.inf.
-
- Parameters
- ----------
- mu_criterion: float
- The |mu|-criterion to apply (see |perform_test| for details).
-
- Returns
- -------
- float
- """
- return (
- -inf
- if self.mu >= mu_criterion
- else -log(self.pseudo_chisqr) / (abs(mu_criterion - self.mu) ** 0.75)
- )
-
- def to_statistics_dataframe(self) -> "DataFrame": # noqa: F821
- """
- Get the statistics related to the test as a |DataFrame| object.
-
- Returns
- -------
- |DataFrame|
- """
- from pandas import DataFrame
-
- statistics: Dict[str, Union[int, float, str]] = {
- "Log pseudo chi-squared": log(self.pseudo_chisqr),
- "Mu": self.mu,
- "Number of parallel RC elements": self.num_RC,
- "Series resistance (ohm)": self.get_series_resistance(),
- "Series capacitance (F)": self.get_series_capacitance(),
- "Series inductance (H)": self.get_series_inductance(),
- }
- return DataFrame.from_dict(
- {
- "Label": list(statistics.keys()),
- "Value": list(statistics.values()),
- }
- )
-
- def get_series_resistance(self) -> float:
- """
- Get the value of the series resistance.
-
- Returns
- -------
- float
- """
- series: Series = self.circuit.get_elements(flattened=False)[0]
- assert isinstance(series, Series)
- for elem_con in series.get_elements(flattened=False):
- if isinstance(elem_con, Resistor):
- return elem_con.get_value("R")
- return nan
-
- def get_series_capacitance(self) -> float:
- """
- Get the value of the series capacitance (or numpy.nan if not included in the circuit).
-
- Returns
- -------
- float
- """
- series: Series = self.circuit.get_elements(flattened=False)[0]
- assert isinstance(series, Series)
- for elem_con in series.get_elements(flattened=False):
- if isinstance(elem_con, Capacitor):
- return elem_con.get_value("C")
- return nan
-
- def get_series_inductance(self) -> float:
- """
- Get the value of the series inductance (or numpy.nan if not included in the circuit).
-
- Returns
- -------
- float
- """
- series: Series = self.circuit.get_elements(flattened=False)[0]
- assert isinstance(series, Series)
- for elem_con in series.get_elements(flattened=False):
- if isinstance(elem_con, Inductor):
- return elem_con.get_value("L")
- return nan
-
-
-def _calculate_tau(i: int, num_RC: int, tau_min: float64, tau_max: float64) -> float:
- # Calculate time constants according to eq. 12 in Schönleber et al. (2014)
- assert issubdtype(type(i), integer), i
- assert issubdtype(type(num_RC), integer), num_RC
- assert issubdtype(type(tau_min), floating), tau_min
- assert issubdtype(type(tau_max), floating), tau_max
- return pow(10, (log(tau_min) + (i - 1) / (num_RC - 1) * log(tau_max / tau_min)))
-
-
-def _generate_time_constants(w: NDArray[float64], num_RC: int) -> NDArray[float64]:
- assert isinstance(w, ndarray), w
- assert issubdtype(type(num_RC), integer), num_RC
- taus: NDArray[float64] = zeros(shape=(num_RC,), dtype=float64)
- tau_min: float64 = 1 / max(w)
- tau_max: float64 = 1 / min(w)
- taus[0] = tau_min
- taus[-1] = tau_max
- i: int
- if num_RC > 1:
- for i in range(2, num_RC):
- taus[i - 1] = _calculate_tau(i, num_RC, tau_min, tau_max)
- return taus
-
-
-def _calculate_mu(params: "Parameters") -> float: # noqa: F821
- from lmfit import Parameters
-
- assert isinstance(params, Parameters), params
- # Calculates the mu-value based on the fitted parameters according to eq. 21 in
- # Schönleber et al. (2014)
- R_neg: List[float64] = []
- R_pos: List[float64] = []
- name: str
- value: float64
- for name, value in params.valuesdict().items():
- if not name.startswith("R"):
- continue
- if value < 0:
- R_neg.append(abs(value))
- else:
- R_pos.append(value)
- neg_sum: float64 = sum(R_neg)
- pos_sum: float64 = sum(R_pos)
- if pos_sum == 0:
- return 0.0
- mu: float64 = 1.0 - neg_sum / pos_sum
- if mu < 0.0:
- return 0.0
- elif mu > 1.0:
- return 1.0
- return float(mu)
-
-
-def _elements_to_parameters(
- elements: NDArray[float64],
- taus: NDArray[float64],
- add_capacitance: bool,
- circuit: Circuit,
-) -> "Parameters": # noqa: F821
- from lmfit import Parameters
-
- assert isinstance(elements, ndarray), elements
- assert isinstance(taus, ndarray), taus
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(circuit, Circuit), circuit
- parameters: Parameters = Parameters()
- R0: float
- R0, elements = elements[0], elements[1:]
- parameters.add(f"R_{0}", R0, vary=False)
- L: float
- L, elements = elements[-1], elements[:-1]
- C: float
- if add_capacitance:
- C, elements = elements[-1], elements[:-1]
- if C == 0.0:
- # Impedance due to the series capacitance is negligible.
- C = 1e50
- else:
- C = 1 / C
- for i, (R, t) in enumerate(zip(elements, taus), start=1):
- parameters.add(f"R_{i}", R, vary=False)
- parameters.add(f"tau_{i}", t, vary=False)
- if add_capacitance:
- i += 1
- parameters.add(f"C_{i}", C)
- i += 1
- parameters.add(f"L_{i}", L, vary=False)
- return parameters
-
-
-def _complex_residual(
- params: "Parameters", # noqa: F821
- circuit: Circuit,
- f: Frequencies,
- Z_exp: ComplexImpedances,
- weight: NDArray[float64],
- identifiers: Dict[int, Element],
-) -> NDArray[float64]:
- from lmfit import Parameters
-
- assert isinstance(params, Parameters), params
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(f, ndarray), f
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(weight, ndarray), weight
- assert isinstance(identifiers, dict), identifiers
- _from_lmfit(params, identifiers)
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- return array(
- [
- (weight * (Z_exp.real - Z_fit.real)) ** 2,
- (weight * (Z_exp.imag - Z_fit.imag)) ** 2,
- ]
- )
-
-
-def _cnls_test(args: tuple) -> Tuple[int, float, Optional[Circuit], float]:
- from lmfit import minimize
- from lmfit.minimizer import MinimizerResult
-
- f: Frequencies
- Z_exp: ComplexImpedances
- weight: NDArray[float64]
- num_RC: int
- add_capacitance: bool
- add_inductance: bool
- method: str
- max_nfev: int
- (
- f,
- Z_exp,
- weight,
- num_RC,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- ) = args
- assert isinstance(f, ndarray), f
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(weight, ndarray), weight
- assert issubdtype(type(num_RC), integer), num_RC
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(add_inductance, bool), add_inductance
- assert isinstance(method, str), method
- assert issubdtype(type(max_nfev), integer), max_nfev
- w: NDArray[float64] = 2 * pi * f
- taus: NDArray[float64] = _generate_time_constants(w, num_RC)
- circuit: Circuit = _generate_circuit(taus, add_capacitance, add_inductance)
- identifiers: Dict[int, Element] = {
- v: k for k, v in circuit.generate_element_identifiers(running=True).items()
- }
- fit: MinimizerResult
- fit = minimize(
- _complex_residual,
- _to_lmfit(identifiers),
- method,
- args=(
- circuit,
- f,
- Z_exp,
- weight,
- identifiers,
- ),
- max_nfev=None if max_nfev < 1 else max_nfev,
- )
- _from_lmfit(fit.params, identifiers)
- mu: float = _calculate_mu(fit.params)
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- Xps: float = _calculate_pseudo_chisqr(Z_exp, Z_fit, weight)
- return (
- num_RC,
- mu,
- circuit,
- Xps,
- )
-
-
-# A shared integer value, which represents the smallest number of RC elements that has resulted
-# in a mu-value that is less than the chosen mu-criterion. If this value is negative, then an
-# appropriate number of RC elements has not yet been found. However, if the value is greater than
-# the number of RC elements currently being evaluated by the process accessing the value, then
-# the process should continue performing the current fitting. Otherwise, any ongoing fitting should
-# be terminated as soon as possible and no further attempts should be made with a greater number of
-# RC elements.
-pool_optimal_num_RC = None # multiprocessing.Value
-
-
-# Initializer for the pool of processes that are used when performing the complex variant of the
-# linear Kramers-Kronig test.
-def _pool_init(args):
- global pool_optimal_num_RC
- pool_optimal_num_RC = args
-
-
-def _cnls_mu_process(args: tuple) -> Tuple[int, float, Optional[Circuit], float]:
- from lmfit import minimize
- from lmfit.minimizer import MinimizerResult
-
- global pool_optimal_num_RC
- f: Frequencies
- Z_exp: ComplexImpedances
- weight: NDArray[float64]
- mu_criterion: float
- num_RC: int
- add_capacitance: bool
- add_inductance: bool
- method: str
- max_nfev: int
- (
- f,
- Z_exp,
- weight,
- mu_criterion,
- num_RC,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- ) = args
- assert isinstance(f, ndarray), f
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(weight, ndarray), weight
- assert issubdtype(type(mu_criterion), floating), mu_criterion
- assert issubdtype(type(num_RC), integer), num_RC
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(add_inductance, bool), add_inductance
- assert isinstance(method, str), method
- assert issubdtype(type(max_nfev), integer), max_nfev
-
- def exit_early(
- params: "Parameters", # noqa: F821
- i: int,
- _,
- *args,
- **kwargs,
- ):
- if 0 <= pool_optimal_num_RC.value < num_RC: # type: ignore
- return True
- return None
-
- w: NDArray[float64] = 2 * pi * f
- taus: NDArray[float64] = _generate_time_constants(w, num_RC)
- circuit: Circuit = _generate_circuit(taus, add_capacitance, add_inductance)
- if 0 <= pool_optimal_num_RC.value < num_RC: # type: ignore
- return (num_RC, -1.0, None, -1.0)
- identifiers: Dict[int, Element] = {
- v: k for k, v in circuit.generate_element_identifiers(running=True).items()
- }
- fit: MinimizerResult
- fit = minimize(
- _complex_residual,
- _to_lmfit(identifiers),
- method,
- args=(
- circuit,
- f,
- Z_exp,
- weight,
- identifiers,
- ),
- max_nfev=None if max_nfev < 1 else max_nfev,
- iter_cb=exit_early,
- )
- if 0 <= pool_optimal_num_RC.value < num_RC: # type: ignore
- return (num_RC, -1.0, None, -1.0)
- _from_lmfit(fit.params, identifiers)
- mu: float = _calculate_mu(fit.params)
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- Xps: float = _calculate_pseudo_chisqr(Z_exp, Z_fit, weight)
- if mu < mu_criterion:
- with pool_optimal_num_RC.get_lock(): # type: ignore
- pool_optimal_num_RC.value = num_RC # type: ignore
- return (
- num_RC,
- mu,
- circuit,
- Xps,
- )
-
-
-def _generate_variable_matrices(
- w: NDArray[float64],
- num_RC: int,
- taus: NDArray[float64],
- add_capacitance: bool,
- abs_Z_exp: Impedances,
-) -> Tuple[NDArray[float64], NDArray[float64]]:
- assert isinstance(w, ndarray), w
- assert issubdtype(type(num_RC), integer), num_RC
- assert isinstance(taus, ndarray), taus
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(abs_Z_exp, ndarray), abs_Z_exp
- # Generate matrices with the following columns (top to bottom is left to right)
- # - R0, series resistance
- # - Ri, resistance in parallel with taus[i - 1] where 0 < i <= num_RC
- # - (C, optional series capacitance)
- # - L, series inductance
- if add_capacitance is True:
- a_re = zeros((w.size, num_RC + 3), dtype=float64)
- a_im = zeros((w.size, num_RC + 3), dtype=float64)
- # Series capacitance
- a_im[:, -2] = -1 / (w * abs_Z_exp) # No real part
- else:
- a_re = zeros((w.size, num_RC + 2), dtype=float64)
- a_im = zeros((w.size, num_RC + 2), dtype=float64)
- # Series resistance
- a_re[:, 0] = 1 / abs_Z_exp # No imaginary part
- # Series inductance
- a_im[:, -1] = w / abs_Z_exp # No real part
- # RC elements
- for i, tau in enumerate(taus):
- a_re[:, i + 1] = (1 / (1 + 1j * w * tau)).real / abs_Z_exp
- a_im[:, i + 1] = (1 / (1 + 1j * w * tau)).imag / abs_Z_exp
- return (
- a_re,
- a_im,
- )
-
-
-def _generate_circuit(
- taus: NDArray[float64],
- add_capacitance: bool,
- add_inductance: bool,
-) -> Circuit:
- assert isinstance(taus, ndarray), taus
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(add_inductance, bool), add_inductance
- cdc: List[str] = ["R{R=1}"]
- t: float
- for t in taus:
- cdc.append(f"K{{R=1,tau={t}F}}")
- if add_capacitance is True:
- cdc.append("C{C=1e-6}")
- if add_inductance is True:
- cdc.append("L{L=1e-3}")
- circuit: Circuit = parse_cdc("".join(cdc))
- for element in circuit.get_elements():
- assert isinstance(element, Element)
- keys: List[str] = list(element.get_values().keys())
- element.set_lower_limits(**{_: -inf for _ in keys})
- element.set_upper_limits(**{_: inf for _ in keys})
- return circuit
-
-
-def _real_test(
- a_re: NDArray[float64],
- a_im: NDArray[float64],
- Z_exp: ComplexImpedances,
- abs_Z_exp: Impedances,
- w: NDArray[float64],
- f: Frequencies,
- taus: NDArray[float64],
- add_capacitance: bool,
- circuit: Circuit,
- identifiers: Dict[int, Element],
-):
- assert isinstance(a_re, ndarray), a_re
- assert isinstance(a_im, ndarray), a_im
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(abs_Z_exp, ndarray), abs_Z_exp
- assert isinstance(w, ndarray), w
- assert isinstance(f, ndarray), f
- assert isinstance(taus, ndarray), taus
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(identifiers, dict), identifiers
- # Fit using the real part
- elements: NDArray[float64] = pinv(a_re).dot(Z_exp.real / abs_Z_exp)
- # Fit using the imaginary part to fix the series inductance (and capacitance)
- a_im = zeros((w.size, 2))
- a_im[:, -1] = w / abs_Z_exp
- if add_capacitance:
- a_im[:, -2] = -1 / (w * abs_Z_exp)
- elements[-2] = 1e-18 # Nullifies the series capacitance without dividing by 0
- _from_lmfit(
- _elements_to_parameters(
- elements,
- taus,
- add_capacitance,
- circuit,
- ),
- identifiers,
- )
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- coefs: NDArray[float64] = pinv(a_im).dot((Z_exp.imag - Z_fit.imag) / abs_Z_exp)
- # Extract the corrected series inductance (and capacitance)
- if add_capacitance:
- elements[-2:] = coefs
- else:
- elements[-1] = coefs[-1]
- _from_lmfit(
- _elements_to_parameters(
- elements,
- taus,
- add_capacitance,
- circuit,
- ),
- identifiers,
- )
-
-
-def _imaginary_test(
- a_im: NDArray[float64],
- Z_exp: ComplexImpedances,
- abs_Z_exp: Impedances,
- f: Frequencies,
- taus: NDArray[float64],
- add_capacitance: bool,
- weight: NDArray[float64],
- circuit: Circuit,
- identifiers: Dict[int, Element],
-):
- assert isinstance(a_im, ndarray), a_im
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(abs_Z_exp, ndarray), abs_Z_exp
- assert isinstance(f, ndarray), f
- assert isinstance(taus, ndarray), taus
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(weight, ndarray), weight
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(identifiers, dict), identifiers
- # Fit using the imaginary part
- elements: NDArray[float64] = pinv(a_im).dot(Z_exp.imag / abs_Z_exp)
- # Estimate the series resistance
- _from_lmfit(
- _elements_to_parameters(
- elements,
- taus,
- add_capacitance,
- circuit,
- ),
- identifiers,
- )
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- elements[0] = array_sum(weight * (Z_exp.real - Z_fit.real)) / array_sum(weight)
- _from_lmfit(
- _elements_to_parameters(
- elements,
- taus,
- add_capacitance,
- circuit,
- ),
- identifiers,
- )
-
-
-def _complex_test(
- a_re: NDArray[float64],
- a_im: NDArray[float64],
- Z_exp: ComplexImpedances,
- abs_Z_exp: Impedances,
- taus: NDArray[float64],
- add_capacitance: bool,
- circuit: Circuit,
- identifiers: Dict[int, Element],
-):
- assert isinstance(a_re, ndarray), a_re
- assert isinstance(a_im, ndarray), a_im
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(abs_Z_exp, ndarray), abs_Z_exp
- assert isinstance(taus, ndarray), taus
- assert isinstance(add_capacitance, bool), add_capacitance
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(identifiers, dict), identifiers
- # Fit using the complex impedance
- x: NDArray[float64] = inv(a_re.T.dot(a_re) + a_im.T.dot(a_im))
- y: NDArray[float64] = a_re.T.dot(Z_exp.real / abs_Z_exp) + a_im.T.dot(
- Z_exp.imag / abs_Z_exp
- )
- elements: NDArray[float64] = x.dot(y)
- _from_lmfit(
- _elements_to_parameters(
- elements,
- taus,
- add_capacitance,
- circuit,
- ),
- identifiers,
- )
-
-
-def _test_wrapper(args: tuple) -> Tuple[int, float, Optional[Circuit], float]:
- test: str
- f: Frequencies
- Z_exp: ComplexImpedances
- weight: NDArray[float64]
- num_RC: int
- add_capacitance: bool
- test, f, Z_exp, weight, num_RC, add_capacitance = args
- assert isinstance(test, str), test
- assert isinstance(f, ndarray), f
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(weight, ndarray), weight
- assert issubdtype(type(num_RC), integer), num_RC
- assert isinstance(add_capacitance, bool), add_capacitance
- abs_Z_exp: Impedances = abs(Z_exp)
- w: NDArray[float64] = 2 * pi * f
- taus: NDArray[float64] = _generate_time_constants(w, num_RC)
- a_re: NDArray[float64]
- a_im: NDArray[float64]
- a_re, a_im = _generate_variable_matrices(
- w, num_RC, taus, add_capacitance, abs_Z_exp
- )
- circuit: Circuit = _generate_circuit(taus, add_capacitance, True)
- identifiers: Dict[int, Element] = {
- v: k for k, v in circuit.generate_element_identifiers(running=True).items()
- }
- # Solve the set of linear equations and update the circuit's parameters
- if test == "real":
- _real_test(
- a_re,
- a_im,
- Z_exp,
- abs_Z_exp,
- w,
- f,
- taus,
- add_capacitance,
- circuit,
- identifiers,
- )
- elif test == "imaginary":
- _imaginary_test(
- a_im,
- Z_exp,
- abs_Z_exp,
- f,
- taus,
- add_capacitance,
- weight,
- circuit,
- identifiers,
- )
- elif test == "complex":
- _complex_test(
- a_re,
- a_im,
- Z_exp,
- abs_Z_exp,
- taus,
- add_capacitance,
- circuit,
- identifiers,
- )
- # Calculate return values
- mu: float = _calculate_mu(_to_lmfit(identifiers))
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- Xps: float = _calculate_pseudo_chisqr(Z_exp, Z_fit, weight)
- return (
- num_RC,
- mu,
- circuit,
- Xps,
- )
-
-
-def _perform_single_cnls_test(
- f: Frequencies,
- Z_exp: ComplexImpedances,
- weight: NDArray[float64],
- num_RC: int,
- add_capacitance: bool,
- add_inductance: bool,
- method: str,
- max_nfev: int,
- num_procs: int,
-) -> Tuple[int, float, Optional[Circuit], float]:
- with Progress("Performing test"):
- args = (
- (
- f,
- Z_exp,
- weight,
- num_RC,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- )
- for _ in range(1)
- )
- fits: List[Tuple[int, float, Optional[Circuit], float]]
- if num_procs > 1:
- # To prevent the GUI thread of DearEIS from locking up
- with Pool(1) as pool:
- fits = pool.map(_cnls_test, args)
- else:
- fits = list(map(_cnls_test, args))
- return fits[0]
-
-
-def _perform_multiple_cnls_tests(
- f: Frequencies,
- Z_exp: ComplexImpedances,
- weight: NDArray[float64],
- num_RC: int,
- mu_criterion: float,
- add_capacitance: bool,
- add_inductance: bool,
- method: str,
- max_nfev: int,
- num_procs: int,
-) -> Tuple[int, float, Optional[Circuit], float]:
- prog: Progress
- with Progress("Preparing arguments") as prog:
- num_RC = abs(num_RC) or len(f)
- num_RCs: List[int] = list(range(1, num_RC + 1))
- args = (
- (
- f,
- Z_exp,
- weight,
- mu_criterion,
- num_RC,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- )
- for num_RC in num_RCs
- )
- fits: List[Tuple[int, float, Optional[Circuit], float]] = []
- prog.set_message("Performing test(s)", i=0, total=len(num_RCs) + 1)
- i: int
- res: Tuple[int, float, Optional[Circuit], float]
- if num_procs > 1:
- with Pool(
- num_procs,
- initializer=_pool_init,
- initargs=(Value("i", -1),),
- ) as pool:
- for i, res in enumerate(pool.imap_unordered(_cnls_mu_process, args)):
- prog.increment()
- if res[2] is not None:
- fits.append(res)
- else:
- _pool_init(Value("i", -1))
- for i, res in enumerate(map(_cnls_mu_process, args)):
- prog.increment()
- if res[2] is not None:
- fits.append(res)
- fits.sort(key=lambda _: _[0])
- mu: float
- circuit: Circuit
- Xps: float
- for i, (num_RC, mu, circuit, Xps) in enumerate(fits):
- if mu < mu_criterion:
- break
- return fits[i]
-
-
-def _perform_linear_tests(
- test: str,
- f: Frequencies,
- Z_exp: ComplexImpedances,
- weight: NDArray[float64],
- num_RC: int,
- mu_criterion: float,
- add_capacitance: bool,
- num_procs: int,
-) -> Tuple[int, float, Optional[Circuit], float]:
- prog: Progress
- with Progress("Preparing arguments") as prog:
- supported_tests: List[str] = [
- "complex",
- "real",
- "imaginary",
- ]
- assert test in supported_tests, f"Unsupported test: '{test}'!"
- num_RCs: List[int]
- if num_RC > 0:
- num_RCs = [num_RC]
- else:
- num_RC = abs(num_RC)
- if num_RC <= 1:
- num_RC = len(f)
- num_RCs = list(range(1, num_RC + 1))
- args = (
- (
- test,
- f,
- Z_exp,
- weight,
- num_RC,
- add_capacitance,
- )
- for num_RC in num_RCs
- )
- fits: List[Tuple[int, float, Optional[Circuit], float]] = []
- prog.set_message("Performing test(s)", i=0, total=len(num_RCs) + 1)
- for i, res in enumerate(map(_test_wrapper, args)):
- prog.increment()
- fits.append(res)
- if len(fits) == 1:
- return fits[0]
- fits.sort(key=lambda _: _[0])
- for i, (num_RC, mu, circuit, Xps) in enumerate(fits):
- if mu < mu_criterion:
- break
- return fits[i]
-
-
-def perform_test(
- data: DataSet,
- test: str = "complex",
- num_RC: int = 0,
- mu_criterion: float = 0.85,
- add_capacitance: bool = False,
- add_inductance: bool = False,
- method: str = "leastsq",
- max_nfev: int = -1,
- num_procs: int = 0,
-) -> TestResult:
- """
- Performs a linear Kramers-Kronig test as described by Boukamp (1995).
- The results can be used to check the validity of an impedance spectrum before performing equivalent circuit fitting.
- If the number of RC elements is less than two, then a suitable number of RC elements is determined using the procedure described by Schönleber et al. (2014) based on a criterion for the calculated |mu| (0.0 to 1.0).
- A |mu| of 1.0 represents underfitting and a |mu| of 0.0 represents overfitting.
-
- References:
-
- - Boukamp, B.A., 1995, J. Electrochem. Soc., 142, 1885-1894 (https://doi.org/10.1149/1.2044210)
- - Schönleber, M., Klotz, D., and Ivers-Tiffée, E., 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
-
- Parameters
- ----------
- data: DataSet
- The data set to be tested.
-
- test: str, optional
- Supported values include "complex", "imaginary", "real", and "cnls".
- The "complex", "imaginary", and "real" tests perform the complex, imaginary, and real tests, respectively, according to Boukamp (1995).
- The "cnls" test, which is slower than the other three tests, performs a complex non-linear least squares fit using `lmfit.minimize`_.
-
- num_RC: int, optional
- The number of RC elements to use.
- A value greater than or equal to one results in the specific number of RC elements being tested.
- A value less than one results in the use of the procedure described by Schönleber et al. (2014) based on the chosen |mu|-criterion.
- If the provided value is negative, then the maximum number of RC elements to test is equal to the absolute value of the provided value.
- If the provided value is zero, then the maximum number of RC elements to test is equal to the number of frequencies in the data set.
-
- mu_criterion: float, optional
- The chosen |mu|-criterion. See Schönleber et al. (2014) for more information.
-
- add_capacitance: bool, optional
- Add an additional capacitance in series with the rest of the circuit.
-
- add_inductance: bool, optional
- Add an additional inductance in series with the rest of the circuit.
- Applies only to the "cnls" test.
-
- method: str, optional
- The fitting method to use when performing a "cnls" test.
- See the list of methods that are listed in the documentation for the lmfit package.
- Methods that do not require providing bounds for all parameters or a function to calculate the Jacobian should work.
-
- max_nfev: int, optional
- The maximum number of function evaluations when fitting.
- A value less than one equals no limit.
- Applies only to the "cnls" test.
-
- num_procs: int, optional
- The maximum number of parallel processes to use when performing a test.
- A value less than 1 results in an attempt to figure out a suitable value based on, e.g., the number of cores detected.
- Additionally, a negative value can be used to reduce the number of processes by that much (e.g., to leave one core for a GUI thread).
- Applies only to the "cnls" test.
-
- Returns
- -------
- TestResult
- """
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert isinstance(test, str), (
- type(test),
- test,
- )
- assert issubdtype(type(num_RC), integer), (
- type(num_RC),
- num_RC,
- )
- num_points: int = len(data.get_frequencies())
- if num_RC > num_points:
- raise KramersKronigError(
- "The value of num_RC must be less than or equal to the number of data points"
- )
- assert issubdtype(type(mu_criterion), floating), (
- type(mu_criterion),
- mu_criterion,
- )
- if num_RC <= 0 and not (0.0 <= mu_criterion <= 1.0):
- raise KramersKronigError(
- "The value of mu_criterion must be between 0.0 and 1.0 (inclusive)"
- )
- assert isinstance(add_capacitance, bool), (
- type(add_capacitance),
- add_capacitance,
- )
- assert isinstance(add_inductance, bool), (
- type(add_inductance),
- add_inductance,
- )
- assert isinstance(method, str), (
- type(method),
- method,
- )
- if method not in _METHODS:
- raise KramersKronigError("Valid method values: '" + "', '".join(_METHODS) + "'")
- assert issubdtype(type(max_nfev), integer), (
- type(max_nfev),
- max_nfev,
- )
- assert issubdtype(type(num_procs), integer), (
- type(num_procs),
- num_procs,
- )
- if num_procs < 1:
- num_procs = _get_default_num_procs() - abs(num_procs)
- if num_procs < 1:
- num_procs = 1
- f: Frequencies = data.get_frequencies()
- Z_exp: ComplexImpedances = data.get_impedances()
- weight: NDArray[float64] = _boukamp_weight(Z_exp)
- mu: float
- circuit: Optional[Circuit] = None
- Xps: float # pseudo chi-squared
- if test == "cnls":
- assert method in _METHODS, f"Unsupported method: '{method}'!"
- if num_RC > 0:
- # Perform the test with a specific number of RC elements
- num_RC, mu, circuit, Xps = _perform_single_cnls_test(
- f,
- Z_exp,
- weight,
- num_RC,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- num_procs,
- )
- else:
- # Find an appropriate number of RC elements based on the calculated mu-value and the
- # provided threshold value. Use multiple parallel processes if possible.
- num_RC, mu, circuit, Xps = _perform_multiple_cnls_tests(
- f,
- Z_exp,
- weight,
- num_RC,
- mu_criterion,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- num_procs,
- )
- else:
- num_RC, mu, circuit, Xps = _perform_linear_tests(
- test,
- f,
- Z_exp,
- weight,
- num_RC,
- mu_criterion,
- add_capacitance,
- num_procs,
- )
- # ========== Result ==========
- assert circuit is not None
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- return TestResult(
- circuit=circuit,
- num_RC=num_RC,
- mu=mu,
- pseudo_chisqr=Xps,
- frequencies=f,
- impedances=Z_fit,
- # Residuals calculated according to eqs. 15 and 16
- # in Schönleber et al. (2014)
- residuals=_calculate_residuals(Z_exp, Z_fit),
- )
-
-
-def _prepare_exploratory_function_and_arguments(
- test: str,
- f: Frequencies,
- Z_exp: ComplexImpedances,
- weight: NDArray[float64],
- num_RCs: List[int],
- add_capacitance: bool,
- add_inductance: bool,
- method: str,
- max_nfev: int,
-) -> Tuple[Callable, Generator]:
- if test == "cnls":
- return (
- _cnls_test,
- (
- (
- f,
- Z_exp,
- weight,
- num_RC,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- )
- for num_RC in num_RCs
- ),
- )
- supported_tests: List[str] = [
- "complex",
- "real",
- "imaginary",
- ]
- assert test in supported_tests, f"Unsupported test: '{test}'!"
- return (
- _test_wrapper,
- (
- (
- test,
- f,
- Z_exp,
- weight,
- num_RC,
- add_capacitance,
- )
- for num_RC in num_RCs
- ),
- )
-
-
-def perform_exploratory_tests(
- data: DataSet,
- test: str = "complex",
- num_RCs: Optional[List[int]] = None,
- mu_criterion: float = 0.85,
- add_capacitance: bool = False,
- add_inductance: bool = False,
- method: str = "leastsq",
- max_nfev: int = -1,
- num_procs: int = 0,
-) -> List[TestResult]:
- """
- Performs a batch of linear Kramers-Kronig tests (Boukamp, 1995), which are then scored and sorted from best to worst before they are returned.
- Based on the algorithm described by Schönleber et al. (2014).
- However, the selection of the number of RC elements takes into account factors other than just the applied |mu|-criterion and the |mu| values of the test results.
- This custom scoring system in combination with the ability to plot the intermediate results (i.e., all test results and corresponding |mu| versus the number of RC elements) should help to avoid false negatives that could otherwise occur in some cases.
-
- References:
-
- - B.A. Boukamp, 1995, J. Electrochem. Soc., 142, 1885-1894 (https://doi.org/10.1149/1.2044210)
- - M. Schönleber, D. Klotz, and E. Ivers-Tiffée, 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
-
- Parameters
- ----------
- data: DataSet
- The data set to be tested.
-
- test: str, optional
- See |perform_test| for details.
-
- num_RCs: Optional[List[int]], optional
- A list of integers representing the various number of RC elements to test.
- An empty list results in all possible numbers of RC elements up to the total number of frequencies being tested.
-
- mu_criterion: float, optional
- See |perform_test| for details.
-
- add_capacitance: bool, optional
- See |perform_test| for details.
-
- add_inductance: bool, optional
- See |perform_test| for details.
-
- method: str, optional
- See |perform_test| for details.
-
- max_nfev: int, optional
- See |perform_test| for details.
-
- num_procs: int, optional
- See |perform_test| for details.
-
- Returns
- -------
- List[TestResult]
- """
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert isinstance(test, str), (
- type(test),
- test,
- )
- if num_RCs is None:
- num_RCs = []
- assert isinstance(num_RCs, list), (
- type(num_RCs),
- num_RCs,
- )
- assert all(map(lambda _: issubdtype(type(_), integer), num_RCs))
- num_points: int = len(data.get_frequencies())
- if len(num_RCs) > 0 and max(num_RCs) > num_points:
- raise KramersKronigError(
- "The maximum value of num_RCs must be less than or equal to the number of data points"
- )
- assert issubdtype(type(mu_criterion), floating), (
- type(mu_criterion),
- mu_criterion,
- )
- if not (0.0 <= mu_criterion <= 1.0):
- raise KramersKronigError(
- "The value of mu_criterion must be between 0.0 and 1.0 (inclusive)"
- )
- assert isinstance(add_capacitance, bool), (
- type(add_capacitance),
- add_capacitance,
- )
- assert isinstance(add_inductance, bool), (
- type(add_inductance),
- add_inductance,
- )
- assert isinstance(method, str), (
- type(method),
- method,
- )
- if method not in _METHODS:
- raise KramersKronigError("Valid method values: '" + "', '".join(_METHODS) + "'")
- assert issubdtype(type(max_nfev), integer), (
- type(max_nfev),
- max_nfev,
- )
- assert issubdtype(type(num_procs), integer), (
- type(num_procs),
- num_procs,
- )
- results: List[TestResult] = []
- if num_procs < 1:
- num_procs = _get_default_num_procs() - abs(num_procs)
- if num_procs < 1:
- num_procs = 1
- f: Frequencies = data.get_frequencies()
- Z_exp: ComplexImpedances = data.get_impedances()
- if len(num_RCs) == 0:
- num_RCs = list(range(1, len(f)))
- num_steps: int = len(num_RCs)
- num_steps += 2 # Calculating weight and preparing arguments
- prog: Progress
- with Progress("Preparing arguments", total=num_steps + 1) as prog:
- weight: NDArray[float64] = _boukamp_weight(Z_exp)
- prog.increment()
- num_RC: int
- func: Callable
- func, args = _prepare_exploratory_function_and_arguments(
- test,
- f,
- Z_exp,
- weight,
- num_RCs,
- add_capacitance,
- add_inductance,
- method,
- max_nfev,
- )
- prog.increment()
- fits: List[Tuple[int, float, Optional[Circuit], float]] = []
- prog.set_message("Performing test(s)")
- if test == "cnls" and num_procs > 1:
- with Pool(num_procs) as pool:
- for res in pool.imap_unordered(func, args):
- fits.append(res)
- prog.increment()
- else:
- for res in map(func, args):
- fits.append(res)
- prog.increment()
- mu: float
- circuit: Optional[Circuit]
- Xps: float
- for (num_RC, mu, circuit, Xps) in fits:
- assert circuit is not None
- Z_fit: ComplexImpedances = circuit.get_impedances(f)
- results.append(
- TestResult(
- circuit=circuit,
- num_RC=num_RC,
- mu=mu,
- pseudo_chisqr=Xps,
- frequencies=f,
- impedances=Z_fit,
- # Residuals calculated according to eqs. 15 and 16
- # in Schönleber et al. (2014)
- residuals=_calculate_residuals(Z_exp, Z_fit),
- )
- )
- scores: List[float] = [_.calculate_score(mu_criterion) for _ in results]
- if all(map(isinf, scores)):
- results.sort(key=lambda _: abs(-6 - log(_.pseudo_chisqr)))
- else:
- results.sort(
- key=lambda _: _.calculate_score(mu_criterion),
- reverse=True,
- )
- return results
diff --git a/src/pyimpspec/analysis/kramers_kronig/__init__.py b/src/pyimpspec/analysis/kramers_kronig/__init__.py
new file mode 100644
index 0000000..69eaa85
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/__init__.py
@@ -0,0 +1,30 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from .result import KramersKronigResult
+from .single import perform_kramers_kronig_test
+from .exploratory import (
+ evaluate_log_F_ext,
+ perform_exploratory_kramers_kronig_tests,
+)
+from .algorithms import (
+ suggest_num_RC,
+ suggest_num_RC_limits,
+ suggest_representation,
+)
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/__init__.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/__init__.py
new file mode 100644
index 0000000..ad1b0bf
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/__init__.py
@@ -0,0 +1,743 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from numpy import (
+ array,
+ diff,
+ exp,
+ float64,
+ isclose,
+ log10 as log,
+ mean,
+)
+from numpy.typing import NDArray
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing import Frequencies
+from pyimpspec.typing.helpers import (
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ _is_boolean,
+ _is_floating,
+ _is_integer,
+ _is_integer_list,
+)
+from .method_1 import suggest as suggest_num_RC_method_1
+from .method_2 import suggest as suggest_num_RC_method_2
+from .method_3 import suggest as suggest_num_RC_method_3
+from .method_4 import suggest as suggest_num_RC_method_4
+from .method_5 import suggest as suggest_num_RC_method_5
+from .method_6 import suggest as suggest_num_RC_method_6
+from .representation import suggest as suggest_representation
+from .utility.pseudo_chi_squared import _approximate_transition_and_end_point
+from .utility.common import subdivide_frequencies
+from .utility.osculating_circle import calculate_curvatures
+
+
+def suggest_num_RC_limits(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ limit_delta: int = 0,
+ threshold: float = 4.0,
+) -> Tuple[int, int]:
+ """
+ Suggest lower and upper limits for the range of number of time constants where the optimal number of time constants should be looked for.
+ The lower limit is the point at which incrementing the number of time constants no longer significantly improves the fit.
+ The upper limit is suggested based on the mean distances between sign changes of the curvatures and when those drop below a threshold.
+
+ References:
+
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce if this value is greater than zero.
+ If this value is set to zero, then the lower limit is automatically estimated.
+ If this value is less than zero, then no lower limit is enforced.
+
+ upper_limit: int, optional
+ The upper limit to enforce if this value is greater than zero.
+ If this value is set to zero, then the upper limit is automatically estimated.
+ If this value is less than zero, then no upper limit is enforced.
+
+ limit_delta: int, optional
+ Alternative way of defining the upper limit as lower limit + delta.
+ Only used if the value is greater than zero.
+
+ threshold: float, optional
+ The threshold for the mean distance between curvature sign changes.
+ This value is used when estimating the upper limit.
+
+ Returns
+ -------
+ Tuple[int, int]
+ The suggested lower and upper limits.
+ """
+ manually_defined_lower_limit: bool = lower_limit > 0
+ manually_defined_upper_limit: bool = upper_limit > 0
+
+ if manually_defined_lower_limit and manually_defined_upper_limit:
+ if limit_delta > 0:
+ return (
+ max((lower_limit, tests[0].num_RC)),
+ min((lower_limit + limit_delta, upper_limit, tests[-1].num_RC)),
+ )
+
+ return (
+ max((lower_limit, tests[0].num_RC)),
+ min((upper_limit, tests[-1].num_RC)),
+ )
+
+ f: Frequencies = tests[0].get_frequencies()
+ x: NDArray[float64]
+ if tests[0].test == "complex":
+ x = array([t.num_RC for t in tests], dtype=float64)
+ else:
+ x = array([t.num_RC for t in tests if t.num_RC <= len(f)], dtype=float64)
+
+ min_x: int = int(min(x))
+ max_x: int = int(max(x))
+
+ y: NDArray[float64] = log([t.pseudo_chisqr for t in tests if t.num_RC <= max_x])
+ possibly_single_resistor_or_capacitor: bool = (
+ min_x == 2 and (diff(x) == 1).all() and (y[:5] < -2).all()
+ )
+
+ if not manually_defined_lower_limit:
+ if possibly_single_resistor_or_capacitor:
+ lower_limit = min_x
+ else:
+ lower_limit, max_x, _ = _approximate_transition_and_end_point(x, y)
+
+ lower_limit = max((min_x, lower_limit))
+ if manually_defined_upper_limit:
+ lower_limit = max((min_x, lower_limit - 1))
+
+ if not manually_defined_upper_limit:
+ if possibly_single_resistor_or_capacitor:
+ upper_limit = min((max_x, len(f)))
+ else:
+ mean_distances: Dict[int, float] = suggest_num_RC_method_5(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=0,
+ relative_scores=False,
+ )
+
+ for upper_limit, value in reversed(mean_distances.items()):
+ if upper_limit <= max_x and value >= threshold:
+ upper_limit = min(
+ (
+ max_x,
+ max((lower_limit + (limit_delta or 1), upper_limit)),
+ )
+ )
+ break
+ else:
+ upper_limit = min((max_x, lower_limit + (limit_delta or 1)))
+
+ if upper_limit <= lower_limit:
+ upper_limit = min((max_x, lower_limit + 1))
+
+ if upper_limit <= lower_limit:
+ if upper_limit >= max_x:
+ lower_limit = max((min_x, upper_limit - (limit_delta or 1)))
+ else:
+ upper_limit = min((max_x, lower_limit + (limit_delta or 1)))
+
+ if upper_limit <= lower_limit:
+ raise ValueError(f"Expected {lower_limit=} < {upper_limit=}")
+
+ if (
+ not manually_defined_lower_limit
+ and lower_limit > min_x
+ and min(tests, key=lambda t: t.num_RC).num_RC < lower_limit
+ ):
+ best_fit_below_lower_limit: KramersKronigResult = min(
+ [t for t in tests if t.num_RC < lower_limit],
+ key=lambda t: t.pseudo_chisqr,
+ )
+ lower_limit_fit: KramersKronigResult = [
+ t for t in tests if t.num_RC == lower_limit
+ ][0]
+ if best_fit_below_lower_limit.pseudo_chisqr < lower_limit_fit.pseudo_chisqr:
+ lower_limit = best_fit_below_lower_limit.num_RC
+
+ if limit_delta > 0:
+ upper_limit = min((lower_limit + limit_delta, max_x))
+
+ return (lower_limit, upper_limit)
+
+
+def _choose_methods(
+ tests: List[KramersKronigResult],
+ lower_limit: int,
+ upper_limit: int,
+ limit_delta: int,
+ methods: List[int],
+ **kwargs,
+) -> Tuple[Dict[int, Callable], int, int]:
+ lower_limit, upper_limit = suggest_num_RC_limits(
+ tests,
+ lower_limit,
+ upper_limit,
+ limit_delta,
+ )
+ if lower_limit >= upper_limit:
+ raise ValueError(f"Expected {lower_limit=} < {upper_limit=}")
+
+ algorithms: Dict[int, Callable] = {
+ 1: lambda: suggest_num_RC_method_1(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ mu_criterion=kwargs.get("mu_criterion", 0.85),
+ beta=kwargs.get("beta", 0.75),
+ ),
+ 2: lambda: suggest_num_RC_method_2(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ ),
+ 3: lambda: suggest_num_RC_method_3(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ ),
+ 4: lambda: suggest_num_RC_method_4(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ ),
+ 5: lambda: suggest_num_RC_method_5(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ ),
+ 6: lambda: suggest_num_RC_method_6(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ ),
+ }
+
+ selection: Dict[int, Callable] = {
+ k: v for k, v in algorithms.items() if k in methods
+ }
+ if len(selection) == 0:
+ if len(methods) == 0:
+ raise ValueError(f"Unsupported suggestion method(s): {methods=}")
+ selection = algorithms
+
+ return (selection, lower_limit, upper_limit)
+
+
+def _suggest_using_mean(
+ tests: List[KramersKronigResult],
+ methods: Dict[int, Callable],
+) -> Tuple[KramersKronigResult, Dict[int, float]]:
+ """
+ Take the highest-ranking test result from each method and then pick the
+ test result closest to the mean number of RC elements.
+ """
+ total_scores: Dict[int, float] = {t.num_RC: 0.0 for t in tests}
+ suggestions: List[int] = []
+
+ m: int
+ func: Callable
+ for m, func in methods.items():
+ d: Dict[int, float] = func()
+ if not isinstance(d, dict):
+ raise TypeError(f"Expected a dictionary instead of {d=}")
+ elif len(d) == 0:
+ continue
+
+ if not all(map(lambda k: _is_integer(k), d.keys())):
+ raise TypeError(f"Expected all keys to be integers instead of {d.keys()=}")
+ elif not all(map(lambda k: k > 0, d.keys())):
+ raise ValueError(
+ f"Expected all keys to be integers greater than zero instead of {d.keys()=}"
+ )
+ elif not all(map(lambda v: _is_floating(v), d.values())):
+ raise TypeError(
+ f"Expected all values to be floats instead of {d.values()=}"
+ )
+ elif not all(map(lambda v: 0.0 <= v <= 1.0, d.values())):
+ raise ValueError(
+ f"Expected all values to be floats in the range [0.0, 1.0] instead of {d.values()=}"
+ )
+
+ suggestions.append(sorted(d.items(), key=lambda kv: kv[1], reverse=True)[0][0])
+
+ i: int
+ for i in range(min(suggestions), max(suggestions) + 1):
+ total_scores[i] += 1.0 * suggestions.count(i)
+
+ num_RC: int = int(round(mean(suggestions)))
+
+ return (
+ sorted(
+ tests,
+ key=lambda t: (abs(t.num_RC - num_RC), t.pseudo_chisqr),
+ )[0],
+ total_scores,
+ )
+
+
+def _suggest_using_ranking(
+ tests: List[KramersKronigResult],
+ methods: Dict[int, Callable],
+) -> Tuple[KramersKronigResult, Dict[int, float]]:
+ """
+ Assign scores to the test results after ranking them according to different
+ approaches. Pick the overall highest-scoring test result.
+ """
+ total_scores: Dict[int, float] = {t.num_RC: 0.0 for t in tests}
+ a: float = 1.0
+ b: float = 1.0
+
+ m: int
+ func: Callable
+ for m, func in methods.items():
+ d: Dict[int, float] = func()
+ if not isinstance(d, dict):
+ raise TypeError(f"Expected a dictionary instead of {d=}")
+ elif len(d) == 0:
+ continue
+
+ if not all(map(lambda k: _is_integer(k), d.keys())):
+ raise TypeError(f"Expected all keys to be integers instead of {d.keys()=}")
+ elif not all(map(lambda k: k > 0, d.keys())):
+ raise ValueError(
+ f"Expected all keys to be integers greater than zero instead of {d.keys()=}"
+ )
+ elif not all(map(lambda v: _is_floating(v), d.values())):
+ raise TypeError(
+ f"Expected all values to be floats instead of {d.values()=}"
+ )
+ elif not all(map(lambda v: 0.0 <= v <= 1.0, d.values())):
+ raise ValueError(
+ f"Expected all values to be floats in the range [0.0, 1.0] instead of {d.values()=}"
+ )
+
+ i: int
+ num_RC: int
+ score: float
+ for i, (num_RC, score) in enumerate(
+ sorted(d.items(), key=lambda kv: kv[1], reverse=True)
+ ):
+ total_scores[num_RC] += a * exp(-b * i)
+
+ return (
+ sorted(
+ tests,
+ key=lambda t: (total_scores.get(t.num_RC, 0.0), -log(t.pseudo_chisqr)),
+ reverse=True,
+ )[0],
+ total_scores,
+ )
+
+
+def _suggest_using_sum(
+ tests: List[KramersKronigResult],
+ methods: Dict[int, Callable],
+) -> Tuple[KramersKronigResult, Dict[int, float]]:
+ """
+ Each method returns relative scores (0.0 to 1.0) that are added together.
+ Overlapping suggestions end up with higher total scores and the highest-
+ scoring result is chosen.
+ """
+ total_scores: Dict[int, float] = {t.num_RC: 0.0 for t in tests}
+
+ m: int
+ func: Callable
+ for m, func in methods.items():
+ d: Dict[int, float] = func()
+ if not isinstance(d, dict):
+ raise TypeError(f"Expected a dictionary instead of {d=}")
+ elif len(d) == 0:
+ continue
+
+ if not all(map(lambda k: _is_integer(k), d.keys())):
+ raise TypeError(f"Expected all keys to be integers instead of {d.keys()=}")
+ elif not all(map(lambda k: k > 0, d.keys())):
+ raise ValueError(
+ f"Expected all keys to be integers greater than zero instead of {d.keys()=}"
+ )
+ elif not all(map(lambda v: _is_floating(v), d.values())):
+ raise TypeError(
+ f"Expected all values to be floats instead of {d.values()=}"
+ )
+ elif not all(map(lambda v: 0.0 <= v <= 1.0, d.values())):
+ raise ValueError(
+ f"Expected all values to be floats in the range [0.0, 1.0] instead of {d.values()=}"
+ )
+
+ i: int
+ num_RC: int
+ score: float
+ for i, (num_RC, score) in enumerate(
+ sorted(d.items(), key=lambda kv: kv[1], reverse=True)
+ ):
+ total_scores[num_RC] += score
+
+ return (
+ sorted(
+ tests,
+ key=lambda t: (total_scores.get(t.num_RC, 0.0), -log(t.pseudo_chisqr)),
+ reverse=True,
+ )[0],
+ total_scores,
+ )
+
+
+def _suggest_using_default(
+ tests: List[KramersKronigResult],
+ lower_limit: int,
+ upper_limit: int,
+ limit_delta: int,
+ **kwargs,
+) -> Tuple[KramersKronigResult, Dict[int, float], int, int]:
+ lower_limit, upper_limit = suggest_num_RC_limits(
+ tests,
+ lower_limit,
+ upper_limit,
+ limit_delta,
+ )
+ if lower_limit >= upper_limit:
+ raise ValueError(f"Expected {lower_limit=} < {upper_limit=}")
+
+ f: Frequencies = tests[0].get_frequencies()
+ subdivided_frequencies: Frequencies
+ if "subdivided_frequencies" in kwargs:
+ subdivided_frequencies = kwargs["subdivided_frequencies"]
+ else:
+ subdivided_frequencies = subdivide_frequencies(f)
+
+ curvatures: Dict[int, NDArray[float64]]
+ if "curvatures" in kwargs:
+ curvatures = kwargs["curvatures"]
+ else:
+ circuits: Dict[int, Circuit] = {t.num_RC: t.circuit for t in tests}
+ curvatures = {
+ num_RC: calculate_curvatures(
+ circuit.get_impedances(subdivided_frequencies)
+ )
+ for num_RC, circuit in circuits.items()
+ }
+
+ num_sign_changes: Dict[int, float] = suggest_num_RC_method_4(
+ tests=tests,
+ lower_limit=min(tests, key=lambda t: t.num_RC).num_RC,
+ upper_limit=max(tests, key=lambda t: t.num_RC).num_RC,
+ offset_factor=0.0,
+ relative_scores=False,
+ subdivided_frequencies=subdivided_frequencies,
+ curvatures=curvatures,
+ )
+ norms: Dict[int, float] = suggest_num_RC_method_3(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ relative_scores=False,
+ subdivided_frequencies=subdivided_frequencies,
+ curvatures=curvatures,
+ )
+ mean_distances: Dict[int, float] = suggest_num_RC_method_5(
+ tests,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ relative_scores=False,
+ subdivided_frequencies=subdivided_frequencies,
+ curvatures=curvatures,
+ )
+
+ tests = [t for t in tests if lower_limit <= t.num_RC <= upper_limit]
+ log_pseudo_chisqrs: Dict[int, float] = {
+ t.num_RC: log(t.pseudo_chisqr) for t in tests
+ }
+
+ # Try to whittle down the num_RC to suggest based on minimizing the number
+ # of sign changes among the curvatures. If that doesn't provide a single
+ # option, then take the options and maximize the mean distance between
+ # sign changes among the curvatures. If that still doesn't provide a single
+ # option, then take the remaining options and pick the option with the best
+ # fit.
+ modified_scores: Dict[int, float] = {
+ num_RC: num_sign_changes[num_RC] for num_RC in mean_distances.keys()
+ }
+ offset_factor: float = 0.1
+ top_candidates: List[int] = []
+
+ offsets: Dict[int, float]
+ invert: bool
+ for offsets, invert in (
+ ({}, False),
+ (norms, False),
+ (mean_distances, True),
+ (log_pseudo_chisqrs, False),
+ ):
+ if len(offsets) > 0:
+ min_value: float = min(offsets.values())
+ max_value: float = max(offsets.values()) - min_value
+
+ if max_value > 0.0:
+ num_RC: int
+ value: float
+ for num_RC, value in offsets.items():
+ if num_RC not in modified_scores:
+ continue
+
+ value = (value - min_value) / max_value
+ if invert:
+ value = 1.0 - value
+
+ modified_scores[num_RC] += offset_factor * value
+
+ top_candidates.clear()
+
+ min_score: float = min(modified_scores.values())
+
+ num_RC: int
+ score: float
+ for num_RC, score in modified_scores.items():
+ if isclose(score, min_score):
+ top_candidates.append(num_RC)
+
+ if len(top_candidates) == 1:
+ break
+
+ if len(top_candidates) > 1:
+ # If by some chance there are still two or more options,
+ # then pick the lowest num_RC among those options.
+ top_candidates.sort()
+ for num_RC in top_candidates[1:]:
+ modified_scores[num_RC] += 1e-4
+ elif len(top_candidates) == 0:
+ raise NotImplementedError()
+
+ relative_scores: Dict[int, float]
+ min_score: float = min(modified_scores.values())
+ max_score = max(modified_scores.values()) - min_score
+ if max_score > 0.0:
+ relative_scores = {
+ num_RC: 1.0 - (score - min_score) / max_score
+ for num_RC, score in modified_scores.items()
+ }
+ else:
+ relative_scores = {num_RC: 1.0 for num_RC in modified_scores.keys()}
+
+ suggested_test: KramersKronigResult = sorted(
+ tests,
+ key=lambda t: relative_scores.get(t.num_RC, 0.0),
+ reverse=True,
+ )[0]
+
+ # In some cases there may be a lower num_RC that offers a better fit.
+ # E.g., there may be a hump near the lower limit of the num_RC range
+ # where the optimal num_RC should exist.
+ suggested_log_pseudo_chisqr: float64 = log(suggested_test.pseudo_chisqr)
+
+ log_pseudo_chisqr: float
+ for num_RC, log_pseudo_chisqr in sorted(
+ log_pseudo_chisqrs.items(),
+ key=lambda kv: kv[1],
+ ):
+ if (
+ num_RC < suggested_test.num_RC
+ and log_pseudo_chisqr < suggested_log_pseudo_chisqr
+ and num_sign_changes[num_RC] <= num_sign_changes[suggested_test.num_RC]
+ ):
+ suggested_test = [t for t in tests if t.num_RC == num_RC][0]
+ break
+
+ return (suggested_test, relative_scores, lower_limit, upper_limit)
+
+
+def suggest_num_RC(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ limit_delta: int = 0,
+ methods: Optional[List[int]] = None,
+ use_mean: bool = False,
+ use_ranking: bool = False,
+ use_sum: bool = False,
+ **kwargs,
+) -> Tuple[KramersKronigResult, Dict[int, float], int, int]:
+ """
+ Suggest the optimal number of RC elements to use as part of the linear Kramers-Kronig test by applying one or more of the following methods:
+
+ - 1: |mu|-criterion (Schönleber et al., 2014).
+ - 2: The norm of the fitted variables (Plank et al., 2022).
+ - 3: The norm of the curvatures across the fitted impedance spectrum (Plank et al., 2022).
+ - 4: The number of sign changes across the curvatures of the fitted impedance spectrum (Plank et al., 2022).
+ - 5: The mean distance between sign changes across the curvatures of the fitted impedance spectrum (Yrjänä and Bobacka, 2024).
+ - 6: The apex of a |log sum abs tau R| (or |log sum abs tau C|) versus the number of RC elements (Yrjänä and Bobacka, 2024).
+
+ If multiple methods are used, then one of several approaches can be used to determine which number of RC elements to suggest:
+
+ - Each method suggests a number of RC elements and the mean is chosen.
+ - Each method ranks the different numbers of RC elements, exponentially decreasing points are assigned based on rank, the points assigned by each method are summed up, and the highest-scoring number of RC elements is chosen.
+ - Each method returns a relative score from 0.0 to 1.0 (worst to best), the relative scores are added up, and the highest-scoring number of RC elements is chosen.
+
+ If no methods are chosen, then the default approach is used:
+
+ - Use method 4 to obtain an initial list of candidates.
+ - Use method 3 to reduce the list of candidates.
+ - Use method 5 to reduce the list of candidates, if necessary.
+ - Use |pseudo chi-squared| to reduce the list of candidates, if necessary.
+ - Try to find a lower number of RC elements with a lower |pseudo chi-squared| and an equal number or fewer sign changes among the curvatures.
+
+ If the lower and/or upper limit is not specified, then |suggest_num_RC_limits| is used to estimate the limit(s).
+
+ Returns a tuple containing the following:
+
+ - The |KramersKronigResult| corresponding to the mean of the suggested number of RC elements.
+ - A dictionary that maps the number of RC elements to their corresponding scores.
+ - The lower limit for the number of RC elements to consider.
+ - The upper limit for the number of RC elements to consider.
+
+ References:
+
+ - M. Schönleber, D. Klotz, and E. Ivers-Tiffée, 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
+ - C. Plank, T. Rüther, and M.A. Danzer, 2022, 2022 International Workshop on Impedance Spectroscopy (IWIS), 1-6, (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ See |suggest_num_RC_limits| for details.
+
+ upper_limit: int, optional
+ See |suggest_num_RC_limits| for details.
+
+ limit_delta: int, optional
+ See |suggest_num_RC_limits| for details.
+
+ methods: Optional[List[int]], optional
+ A list of integers corresponding to the supported methods.
+
+ use_mean: bool, optional
+ If true, then the mean value of the number of RC elements suggested by each of the selected methods is chosen.
+
+ use_ranking: bool, optional
+ If true, then each selected method ranks the numbers of RC elements, a score is assigned based on ranking, and the highest-scoring number of RC elements is chosen.
+
+ use_sum: bool, optional
+ If true, then the scores returned by each of the selected methods are summed up and the highest-scoring number of RC elements is chosen.
+
+ **kwargs
+ Keyword arguments are passed on to the underlying methods.
+
+ Returns
+ -------
+ Tuple[|KramersKronigResult|, Dict[int, float], int, int]
+ """
+ if not isinstance(tests, list):
+ raise TypeError(f"Expected a list instead of {tests=}")
+ tests = sorted(tests, key=lambda t: t.num_RC)
+
+ if not _is_integer(lower_limit):
+ raise TypeError(f"Expected an integer instead of {lower_limit=}")
+ elif not (lower_limit >= 0):
+ raise ValueError(
+ f"Expected an integer greater than or equal to zero instead of {lower_limit=}"
+ )
+
+ if not _is_integer(upper_limit):
+ raise TypeError(f"Expected an integer instead of {upper_limit=}")
+ elif upper_limit < 0:
+ upper_limit = tests[-1].num_RC + upper_limit
+
+ if not _is_integer(limit_delta):
+ raise TypeError(f"Expected an integer instead of {limit_delta=}")
+
+ if methods is None:
+ methods = []
+ elif not _is_integer_list(methods):
+ raise TypeError(f"Expected None or a list of integers instead of {methods=}")
+
+ if not _is_boolean(use_mean):
+ raise TypeError(f"Expected a boolean instead of {use_mean=}")
+ elif not _is_boolean(use_ranking):
+ raise TypeError(f"Expected a boolean instead of {use_ranking=}")
+ elif not _is_boolean(use_sum):
+ raise TypeError(f"Expected a boolean instead of {use_sum=}")
+ elif sum((use_sum, use_mean, use_ranking)) > 1:
+ raise ValueError(
+ f"Only one way of combining suggestions can be active at a time instead of {use_mean=}, {use_ranking=}, and {use_sum=}"
+ )
+
+ selection: Dict[int, Callable]
+ if (any((use_mean, use_ranking, use_sum)) and len(methods) > 0) or len(
+ methods
+ ) == 1:
+ selection, lower_limit, upper_limit = _choose_methods(
+ tests,
+ lower_limit,
+ upper_limit,
+ limit_delta,
+ methods,
+ **kwargs,
+ )
+
+ if use_sum:
+ suggested_test, total_scores = _suggest_using_sum(tests, selection)
+ elif use_mean:
+ suggested_test, total_scores = _suggest_using_mean(tests, selection)
+ elif use_ranking:
+ suggested_test, total_scores = _suggest_using_ranking(tests, selection)
+ elif len(methods) == 1:
+ suggested_test, total_scores = _suggest_using_sum(tests, selection)
+ else:
+ raise NotImplementedError()
+
+ elif len(methods) == 0:
+ suggested_test, total_scores, lower_limit, upper_limit = _suggest_using_default(
+ tests,
+ lower_limit,
+ upper_limit,
+ limit_delta,
+ **kwargs,
+ )
+ elif len(methods) > 1:
+ raise ValueError(
+ "Multiple methods for suggesting the optimal number of RC elements have been chosen, but the manner in which the scores are combined has not been chosen!"
+ )
+ else:
+ raise NotImplementedError()
+
+ return (
+ suggested_test,
+ total_scores,
+ lower_limit,
+ upper_limit,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/method_1.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_1.py
new file mode 100644
index 0000000..dc381c9
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_1.py
@@ -0,0 +1,354 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from numpy import (
+ array,
+ ceil,
+ float64,
+ inf,
+ isclose,
+ isnan,
+ log10 as log,
+ nan,
+)
+from numpy.typing import NDArray
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Tuple,
+ _is_floating,
+)
+from .utility.mu import _calculate_mu
+from .utility.common import (
+ _generate_pseudo_chisqr_offsets,
+ _truncate_circuits,
+)
+from .utility.logistic import (
+ _logistic_derivative,
+ _logistic_function,
+)
+
+
+def _calculate_score(
+ circuit: Circuit,
+ pseudo_chisqr: float,
+ mu_criterion: float = 0.85,
+ beta: float = 0.75,
+) -> float:
+ r"""
+ Calculate a score based on the |mu| and |pseudo chi-squared| values of the fitted circuit, and the provided |mu crit|:
+
+ :math:`S = \frac{-log{\chi^2_{\rm ps}}}{\left(\mu_{\rm crit} - \mu\right)^\beta}`
+
+ If a circuit has :math:`\mu \geq \mu_{\rm crit}` or if |mu| is |numpy.nan|, then |numpy.nan| is returned.
+ This score is not a part of the original algorithm proposed by Schönleber et al. (2014), but rather a measure for avoiding premature terminations that can occur in the original algorithm if, e.g., |mu| fluctuates too much at low numbers of RC elements.
+ The equation above rewards circuits that have a good fit (i.e., a low |pseudo chi-squared| value) and penalizes large differences between |mu crit| and |mu|.
+ The value of |beta| is determined heuristically and 0.75 seems to work well.
+
+ References:
+
+ - M. Schönleber, D. Klotz, and E. Ivers-Tiffée, 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
+
+ Parameters
+ ----------
+ circuit: Circuit
+ The circuit that was fitted as part of the Kramers-Kronig test.
+
+ pseudo_chisqr: float
+ The pseudo chi-squared corresponding to the fitted circuit.
+
+ mu_criterion: float, optional
+ The |mu crit| to apply.
+
+ beta: float, optional
+ The exponent used in the denominator.
+
+ Returns
+ -------
+ float
+ """
+ mu: float = _calculate_mu(circuit)
+
+ if isnan(mu) or mu >= mu_criterion:
+ return nan
+
+ return -log(pseudo_chisqr) / ((mu_criterion - mu) ** beta)
+
+
+def _fit_logistic_function(tests: List[KramersKronigResult]) -> Tuple[float, ...]:
+ from scipy.optimize import curve_fit
+
+ x: NDArray[float64] = array([t.num_RC for t in tests], dtype=float64)
+ y: NDArray[float64] = array(
+ [
+ mu if not isnan(mu) else 1.0
+ for mu in (_calculate_mu(t.circuit) for t in tests)
+ ]
+ )
+ a: float = max(y) - min(y)
+ d: float = max(y) - a
+
+ p: Tuple[float, ...] = curve_fit(
+ lambda x, b, c: _logistic_function(x, a, b, c, d),
+ x,
+ y,
+ p0=(1.0, 10),
+ bounds=(
+ [-inf, min(x) + 1],
+ [inf, max(x) - 1],
+ ),
+ )[0]
+
+ return tuple((a, *p, d))
+
+
+def suggest(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ mu_criterion: float = 0.85,
+ beta: float = 0.75,
+ relative_scores: bool = True,
+) -> Dict[int, float]:
+ """
+ The value |mu| describes the ratio of the total mass of negative resistances to the total mass of positive resistances:
+
+ :math:`\\mu = 1 - \\frac{\\Sigma_{R_k < 0} |R_k|}{\\Sigma_{R_k \\geq 0} |R_k|}`
+
+ |mu| ranges from 0.0 to 1.0 and these extremes represent overfitting and underfitting, respectively.
+ Overfitting manifests as an oscillating fitted impedance spectrum, which is made possible by a mix of positive and negative resistances.
+ The number of RC elements is incremented until the corresponding |mu| drops below the threshold |mu crit|.
+
+ The first modification is to adapt the equation above for use with validation of immittance data in the admittance representation:
+
+ :math:`\\mu = 1 - \\frac{\\Sigma_{C_k < 0} |C_k|}{\\Sigma_{C_k \\geq 0} |C_k|}`
+
+ The denominator can be less than one, so the calculated values are clamped to only range from 0.0 to 1.0.
+
+ The second modification is that the iteration is done in reverse (i.e., by decrementing the number of RC elements instead of incrementing it) since there can be significant fluctuation of |mu| at low numbers of RC elements, which can cause the iterative process to stop too early.
+
+ The third modification is to calculate an additional score, :math:`S`, as follows:
+
+ :math:`S = \\frac{-\\log{\\chi^2_{\\rm ps}}}{{\\left(\\mu_{\\rm crit} - \\mu\\right)}^{\\beta}}`
+
+ The exponent |beta| is determined heuristically and a value of 0.75 seems to work well.
+ Only |mu| values less than |mu crit| are considered when calculating :math:`S`.
+ The use of this score helps to deal with the fluctuation that affects the use of |mu| directly.
+ If |beta| is set to zero, then the second and third modification are skipped.
+
+ If |mu| is negative, then an alternative approach is used whereby a logistic function is fitted to a plot of |mu| versus the number of RC elements.
+ The intercept (rounded up) of the slope at the midpoint of that function and a line at the highest point of the function is used to pick the optimal number of RC elements.
+
+ The returned dictionary maps the number of RC elements to a score ranging from 0.0 to 1.0 with the latter representing the highest-ranking candidate.
+
+ References:
+
+ - M. Schönleber, D. Klotz, and E. Ivers-Tiffée, 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the lower limit must have a smaller value than the upper limit.
+
+ upper_limit: int, optional
+ The upper limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the upper limit must have a greater value than the lower limit.
+
+ mu_criterion: float, optional
+ The |mu crit| to apply.
+ Schönleber et al. (2014) recommended 0.85 based on their experiences.
+ If a value less than zero is provided, then the alternative approach based on fitting a logistic function to pick the optimal number of RC elements is used.
+
+ beta: float, optional
+ Exponent used to tweak the influence of the proximity of |mu| to |mu crit| when calculating scores.
+ If set to zero, then the iteration direction is not reversed and the score :math:`S` is not calculated.
+
+ relative_scores: bool, optional
+ Return relative scores ranging from 0.0 to 1.0 (from worst to best) rather than the raw values.
+
+ Returns
+ -------
+ Dict[int, float]
+ """
+ if not _is_floating(mu_criterion):
+ raise TypeError(f"Expected a float instead of {mu_criterion=}")
+ elif isclose(mu_criterion, 0.0):
+ raise ValueError(f"Expected {mu_criterion=} != 0.0")
+ elif not ((0.0 < mu_criterion < 1.0) or (-1.0 <= mu_criterion < 0.0)):
+ raise ValueError(
+ f"Expected (0.0 < {mu_criterion=} < 1.0) or (-1.0 < {mu_criterion=} < 0.0)"
+ )
+
+ if not _is_floating(beta):
+ raise TypeError(f"Expected a float instead of {beta=}")
+ elif beta < 0.0:
+ raise ValueError(
+ f"Expected a value greater than or equal to zero instead of {beta=}"
+ )
+
+ circuits: Dict[int, Circuit] = {t.num_RC: t.circuit for t in tests}
+ circuits = _truncate_circuits(circuits, lower_limit, upper_limit)
+ scores: Dict[int, float]
+
+ # Original approach
+ if isclose(beta, 0.0):
+ if not relative_scores:
+ scores = {
+ num_RC: (mu if not isnan(mu) else 1.0)
+ for num_RC, mu in {
+ num_RC: _calculate_mu(circuit)
+ for num_RC, circuit in circuits.items()
+ }.items()
+ }
+
+ if any(map(isnan, scores.values())):
+ raise ValueError(
+ f"Expected values that are not NaN instead of {scores=}"
+ )
+
+ return {
+ num_RC: (value if not isnan(value) else 1.0)
+ for num_RC, value in scores.items()
+ }
+
+ scores = {
+ num_RC: (mu if not (isnan(mu) or mu >= mu_criterion) else nan)
+ for num_RC, mu in {
+ num_RC: _calculate_mu(circuit) for num_RC, circuit in circuits.items()
+ }.items()
+ }
+ if all(map(isnan, scores.values())):
+ # None of the mu values are below the chosen threshold so pick the
+ # highest number of RC elements.
+ return {
+ num_RC: (1.0 if num_RC == max(circuits.keys()) else 0.0)
+ for num_RC in circuits
+ }
+
+ # Pick the first instance where the mu value drops below the chosen
+ # threshold.
+ lowest_num_RC: int = min(
+ (num_RC for num_RC, value in scores.items() if not isnan(value))
+ )
+ return {
+ num_RC: (1.0 if num_RC == lowest_num_RC else 0.0)
+ for num_RC in scores.keys()
+ }
+
+ pseudo_chisqrs: Dict[int, float] = {
+ t.num_RC: t.pseudo_chisqr for t in tests if t.num_RC in circuits
+ }
+
+ # Modified approach 1
+ if mu_criterion < 0.0:
+ p: Tuple[float, ...] = _fit_logistic_function(tests)
+ slope: float = _logistic_derivative(p[2], *p)
+ intercept: float = _logistic_function(p[2], *p) - slope * p[2]
+ if not relative_scores:
+ return {
+ num_RC: _logistic_function(num_RC, *p) for num_RC in circuits.keys()
+ }
+
+ offset_factor: float = 1e-6
+ offsets: Dict[int, float] = _generate_pseudo_chisqr_offsets(
+ pseudo_chisqrs, factor=offset_factor
+ )
+
+ target_num_RC: float = ceil((intercept - (p[0] + p[3])) / (0.0 - slope))
+ scores = {
+ num_RC: abs(target_num_RC - num_RC) + (offset_factor - offsets[num_RC])
+ for num_RC in circuits.keys()
+ }
+ min_score: float = min(scores.values())
+ max_score: float = max(scores.values()) - min_score
+
+ scores = {
+ num_RC: 1.0 - (value - min_score) / max_score
+ for num_RC, value in scores.items()
+ }
+
+ return scores
+
+ # Modified approach 2
+ scores = {
+ num_RC: _calculate_score(
+ circuit,
+ pseudo_chisqrs[num_RC],
+ mu_criterion=mu_criterion,
+ beta=beta,
+ )
+ for num_RC, circuit in circuits.items()
+ }
+ if all(map(isnan, scores.values())):
+ # None of the mu values are below the chosen threshold so pick the
+ # highest number of RC elements.
+ return {
+ num_RC: (1.0 if num_RC == max(circuits.keys()) else 0.0)
+ for num_RC in circuits
+ }
+
+ # Iterative in reverse and pick the highest number of RC elements before
+ # the mu values rise above the chosen threshold.
+ num_RCs: List[int] = sorted(circuits.keys())
+ i: int = len(num_RCs)
+ while i > 0:
+ i -= 1
+ score: float = scores[num_RCs[i]]
+ if isnan(score):
+ i += 1
+ break
+
+ if i >= len(num_RCs):
+ i = len(num_RCs) - 1
+
+ non_nan_scores: List[float] = [
+ scores[num_RC] for num_RC in num_RCs[i:] if not isnan(scores[num_RC])
+ ]
+ if len(non_nan_scores) == 0:
+ return {num_RC: 0.0 for num_RC in scores.keys()}
+
+ if not relative_scores:
+ return {
+ num_RC: (
+ (scores[num_RC] if not isnan(scores[num_RC]) else 0.0)
+ if j >= i
+ else 0.0
+ )
+ for j, num_RC in enumerate(num_RCs)
+ }
+
+ min_score = min(non_nan_scores)
+ max_score = max(non_nan_scores) - min_score
+ if max_score == 0.0:
+ min_score = 0.0
+ max_score = max(non_nan_scores)
+
+ return {
+ num_RC: float(((scores[num_RC] - min_score) / max_score) if j >= i else 0.0)
+ for j, num_RC in enumerate(num_RCs)
+ }
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/method_2.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_2.py
new file mode 100644
index 0000000..0d4c18c
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_2.py
@@ -0,0 +1,139 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+
+from numpy.linalg import norm
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.kramers_kronig import (
+ KramersKronigAdmittanceRC,
+ KramersKronigRC,
+)
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+)
+from .utility.common import (
+ _is_admittance_test_circuit,
+ _truncate_circuits,
+)
+
+
+def _calculate_zeta(circuit: Circuit) -> float:
+ theta: List[float] = []
+ admittance: bool = _is_admittance_test_circuit(circuit)
+ num_RC: int = 0
+
+ # Extract the original fitted values obtained by solving the set of
+ # linear equations.
+ element: Element
+ parameters: Dict[str, float]
+ if admittance:
+ for element in circuit.get_elements(recursive=True):
+ parameters = element.get_values()
+ if isinstance(element, KramersKronigAdmittanceRC):
+ num_RC += 1
+
+ if "tau" in parameters:
+ v = parameters["C"]
+ elif "R" in parameters:
+ v = 1 / parameters["R"]
+ elif "C" in parameters:
+ v = parameters["C"]
+ elif "L" in parameters:
+ v = 1 / parameters["L"]
+
+ theta.append(v)
+
+ else:
+ for element in circuit.get_elements(recursive=True):
+ parameters = element.get_values()
+ if isinstance(element, KramersKronigRC):
+ num_RC += 1
+
+ if "tau" in parameters:
+ v = parameters["R"]
+ elif "R" in parameters:
+ v = parameters["R"]
+ elif "C" in parameters:
+ v = 1 / parameters["C"]
+ elif "L" in parameters:
+ v = parameters["L"]
+
+ theta.append(v)
+
+ return norm(theta) / num_RC
+
+
+def suggest(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ relative_scores: bool = True,
+) -> Dict[int, float]:
+ """
+ Suggest the optimal number of RC elements to use based on the norm of the fitted variables divided by the number of RC elements.
+ Growing norms are used as indications of underfitting and overfitting.
+ Thus, a minimum of the norm of the fitted variables should coincide with the desired optimum.
+
+ References:
+
+ - C. Plank, T. Rüther, and M.A. Danzer, 2022, 2022 International Workshop on Impedance Spectroscopy (IWIS), 1-6, (https://doi.org/10.1109/IWIS57888.2022.9975131)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the lower limit must have a smaller value than the upper limit.
+
+ upper_limit: int, optional
+ The upper limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the upper limit must have a greater value than the lower limit.
+
+ relative_scores: bool, optional
+ Return relative scores ranging from 0.0 to 1.0 (from worst to best) rather than the raw values.
+
+ Returns
+ -------
+ Dict[int, float]
+ """
+ circuits: Dict[int, Circuit] = {t.num_RC: t.circuit for t in tests}
+ circuits = _truncate_circuits(circuits, lower_limit, upper_limit)
+ scores: Dict[int, float] = {
+ num_RC: _calculate_zeta(circuit) for num_RC, circuit in circuits.items()
+ }
+
+ if not relative_scores:
+ return scores
+
+ min_score: float = min(scores.values())
+ max_score: float = max(scores.values()) - min_score
+ if max_score == 0.0:
+ return {num_RC: 1.0 for num_RC in scores.keys()}
+
+ return {
+ num_RC: float(1.0 - (value - min_score) / max_score)
+ for num_RC, value in scores.items()
+ }
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/method_3.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_3.py
new file mode 100644
index 0000000..fab85f6
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_3.py
@@ -0,0 +1,131 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+
+from numpy import float64
+from numpy.linalg import norm
+from numpy.typing import NDArray
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing.aliases import Frequencies
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+)
+from .utility.osculating_circle import calculate_curvatures
+from .utility.common import (
+ _truncate_circuits,
+ subdivide_frequencies,
+)
+
+
+def suggest(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ subdivision: int = 4,
+ subdivided_frequencies: Optional[Frequencies] = None,
+ curvatures: Optional[Dict[int, NDArray[float64]]] = None,
+ relative_scores: bool = True,
+) -> Dict[int, float]:
+ """
+ Suggest the optimal number of RC elements to use based on the norm of the curvatures of the fitted impedance spectrum.
+ The curvatures at each point of the fitted impedance spectrum is approximated using an osculating circle.
+ A minimum of the norm of these curvatures should coincide with the desired optimum.
+
+ A modification whereby the frequency intervals are subdivided before determining curvatures is used to make the method less prone to suggesting circuits that produce oscillating impedance spectra.
+ This modified approach is used by default, but the original approach can be used by setting ``subdivision = 0``.
+
+ References:
+
+ - C. Plank, T. Rüther, and M.A. Danzer, 2022, 2022 International Workshop on Impedance Spectroscopy (IWIS), 1-6, (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the lower limit must have a smaller value than the upper limit.
+
+ upper_limit: int, optional
+ The upper limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the upper limit must have a greater value than the lower limit.
+
+ subdivision: int, optional
+ If greater than zero, then that number of additional frequencies are inserted into each frequency interval.
+
+ subdivided_frequencies: Optional[Frequencies], optional
+ Frequencies that have already been subdivided. If multiple methods that require subdividing frequencies will be used, then this provides a means of reusing those frequencies.
+
+ curvatures: Optional[Dict[int, NDArray[float64]]], optional
+ Curvatures that have already been estimated. If multiple methods that require curvatures will be used, then this provides a means of reusing those curvatures.
+
+ relative_scores: bool, optional
+ Return relative scores ranging from 0.0 to 1.0 (from worst to best) rather than the raw values.
+
+ Returns
+ -------
+ Dict[int, float]
+ """
+ f: Frequencies = tests[0].get_frequencies()
+ if subdivided_frequencies is not None:
+ f = subdivided_frequencies
+ elif subdivision > 0:
+ f = subdivide_frequencies(f, subdivision=subdivision)
+
+ circuits: Dict[int, Circuit] = {t.num_RC: t.circuit for t in tests}
+ circuits = _truncate_circuits(circuits, lower_limit, upper_limit)
+
+ scores: Dict[int, float] = {}
+ if curvatures is not None:
+ for num_RC, circuit in circuits.items():
+ if num_RC in curvatures:
+ scores[num_RC] = norm(curvatures[num_RC])
+ else:
+ scores[num_RC] = norm(
+ calculate_curvatures(
+ circuit.get_impedances(f)
+ )
+ )
+ else:
+ for num_RC, circuit in circuits.items():
+ scores[num_RC] = norm(
+ calculate_curvatures(
+ circuit.get_impedances(f)
+ )
+ )
+
+ if not relative_scores:
+ return scores
+
+ min_score: float = min(scores.values())
+ max_score: float = max(scores.values()) - min_score
+ if max_score == 0.0:
+ return {num_RC: 1.0 for num_RC in scores.keys()}
+
+ return {
+ num_RC: float(1.0 - (value - min_score) / max_score)
+ for num_RC, value in scores.items()
+ }
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/method_4.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_4.py
new file mode 100644
index 0000000..4300290
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_4.py
@@ -0,0 +1,205 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+
+from numpy import (
+ float64,
+ sign,
+)
+from numpy.typing import NDArray
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing.aliases import Frequencies
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+ _is_floating,
+)
+from .utility.common import (
+ _generate_pseudo_chisqr_offsets,
+ _truncate_circuits,
+ subdivide_frequencies,
+)
+from .utility.osculating_circle import calculate_curvatures
+
+
+def _count_sign_changes(kappas: NDArray[float64]) -> int:
+ previous_sign: int = sign(kappas[0])
+ n: int = 1
+
+ for i in range(1, len(kappas)):
+ if kappas[i] == 0.0:
+ continue
+
+ current_sign: int = sign(kappas[i])
+ if current_sign != previous_sign:
+ n += 1
+ previous_sign = current_sign
+
+ return n
+
+
+def _suggest(
+ circuits: Dict[int, Circuit],
+ pseudo_chisqrs: Dict[int, float],
+ f: Frequencies,
+ offset_factor: float,
+ lower_limit: int,
+ upper_limit: int,
+ curvatures: Optional[Dict[int, NDArray[float64]]],
+ relative_scores: bool,
+) -> Dict[int, float]:
+ if not _is_floating(offset_factor):
+ raise TypeError(f"Expected a float instead of {offset_factor=}")
+ elif not (0.0 <= offset_factor < 1.0):
+ raise ValueError(
+ f"Expected a value in the range [0.0, 1.0) instead of {offset_factor=}"
+ )
+
+ circuits = _truncate_circuits(circuits, lower_limit, upper_limit)
+
+ pseudo_chisqrs = {
+ num_RC: pseudo_chisqr
+ for num_RC, pseudo_chisqr in pseudo_chisqrs.items()
+ if num_RC in circuits
+ }
+
+ offsets: Dict[int, float] = _generate_pseudo_chisqr_offsets(
+ pseudo_chisqrs,
+ factor=offset_factor,
+ )
+
+ scores: Dict[int, float] = {}
+ if curvatures is not None:
+ for num_RC, circuit in circuits.items():
+ if num_RC in curvatures:
+ scores[num_RC] = (
+ _count_sign_changes(
+ curvatures[num_RC]
+ ) + offsets[num_RC]
+ )
+ else:
+ scores[num_RC] = (
+ _count_sign_changes(
+ calculate_curvatures(
+ circuit.get_impedances(f)
+ )
+ ) + offsets[num_RC]
+ )
+ else:
+ for num_RC, circuit in circuits.items():
+ scores[num_RC] = (
+ _count_sign_changes(
+ calculate_curvatures(
+ circuit.get_impedances(f)
+ )
+ ) + offsets[num_RC]
+ )
+
+ if not relative_scores:
+ return scores
+
+ min_score: float = min(scores.values())
+ max_score: float = max(scores.values()) - min_score
+ if max_score == 0.0:
+ return {num_RC: 1.0 for num_RC in scores.keys()}
+
+ return {
+ num_RC: 1.0 - float((value - min_score) / max_score)
+ for num_RC, value in scores.items()
+ }
+
+
+def suggest(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ subdivision: int = 4,
+ subdivided_frequencies: Optional[Frequencies] = None,
+ curvatures: Optional[Dict[int, NDArray[float64]]] = None,
+ offset_factor: float = 1e-1,
+ relative_scores: bool = True,
+) -> Dict[int, float]:
+ """
+ Suggest the optimal number of RC elements to use based on the number of sign changes of curvatures of the fitted impedance spectrum.
+ The curvatures at each point of the fitted impedance spectrum is approximated using an osculating circle.
+ An increasing number of sign changes of these curvatures results from oscillations brought on by overfitting.
+ Thus, a minimum of the number of sign changes should coincide with the desired optimum.
+
+ The method is modified by subdividing the frequency intervals, which makes the method less prone to suggesting circuits that produce oscillating impedance spectra.
+ Small offsets are also added to the number of sign changes based on the corresponding pseudo chi-squared values in order to act as tiebreakers in case there are multiple numbers of RC elements that correspond to the same number of sign changes.
+ This modified approach is used by default, but the original approach can be used by setting ``subdivision = 0`` and ``offset_factor = 0.0``.
+
+ References:
+
+ - C. Plank, T. Rüther, and M.A. Danzer, 2022, 2022 International Workshop on Impedance Spectroscopy (IWIS), 1-6, (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the lower limit must have a smaller value than the upper limit.
+
+ upper_limit: int, optional
+ The upper limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the upper limit must have a greater value than the lower limit.
+
+ subdivision: int, optional
+ If greater than zero, then that number of additional frequencies are inserted into each frequency interval.
+
+ subdivided_frequencies: Optional[Frequencies], optional
+ Frequencies that have already been subdivided. If multiple methods that require subdividing frequencies will be used, then this provides a means of reusing those frequencies.
+
+ curvatures: Optional[Dict[int, NDArray[float64]]], optional
+ Curvatures that have already been estimated. If multiple methods that require curvatures will be used, then this provides a means of reusing those curvatures.
+
+ offset_factor: float, optional
+ The factor that an offset is multiplied by when it is being added to a number of sign changes.
+ Must be in the range [0.0, 1.0).
+
+ relative_scores: bool, optional
+ Return relative scores ranging from 0.0 to 1.0 (from worst to best) rather than the raw values.
+
+ Returns
+ -------
+ Dict[int, float]
+ """
+ f: Frequencies = tests[0].get_frequencies()
+ if subdivided_frequencies is not None:
+ f = subdivided_frequencies
+ elif subdivision > 0:
+ f = subdivide_frequencies(f, subdivision=subdivision)
+
+ return _suggest(
+ circuits={t.num_RC: t.circuit for t in tests},
+ pseudo_chisqrs={t.num_RC: t.pseudo_chisqr for t in tests},
+ f=f,
+ offset_factor=offset_factor,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ curvatures=curvatures,
+ relative_scores=relative_scores,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/method_5.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_5.py
new file mode 100644
index 0000000..6b80d46
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_5.py
@@ -0,0 +1,163 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+
+from numpy import (
+ float64,
+ mean,
+)
+from numpy.typing import NDArray
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing.aliases import Frequencies
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+)
+from .utility.osculating_circle import (
+ _calculate_sign_change_distances,
+ calculate_curvatures,
+)
+from .utility.common import (
+ _truncate_circuits,
+ subdivide_frequencies,
+)
+
+
+def _suggest(
+ circuits: Dict[int, Circuit],
+ f: Frequencies,
+ lower_limit: int,
+ upper_limit: int,
+ curvatures: Optional[Dict[int, NDArray[float64]]],
+ relative_scores: bool,
+) -> Dict[int, float]:
+ circuits = _truncate_circuits(circuits, lower_limit, upper_limit)
+
+ scores: Dict[int, float] = {}
+ if curvatures is not None:
+ for num_RC, circuit in circuits.items():
+ if num_RC in curvatures:
+ scores[num_RC] = mean(
+ _calculate_sign_change_distances(
+ curvatures[num_RC]
+ )
+ )
+ else:
+ scores[num_RC] = mean(
+ _calculate_sign_change_distances(
+ calculate_curvatures(
+ circuit.get_impedances(f)
+ )
+ )
+ )
+ else:
+ for num_RC, circuit in circuits.items():
+ scores[num_RC] = mean(
+ _calculate_sign_change_distances(
+ calculate_curvatures(
+ circuit.get_impedances(f)
+ )
+ )
+ )
+
+ if not relative_scores:
+ return scores
+
+ min_score: float = min(scores.values())
+ max_score: float = max(scores.values()) - min_score
+ if max_score == 0.0:
+ return {num_RC: 1.0 for num_RC in scores.keys()}
+
+ return {
+ num_RC: float((value - min_score) / max_score)
+ for num_RC, value in scores.items()
+ }
+
+
+def suggest(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ subdivision: int = 4,
+ subdivided_frequencies: Optional[Frequencies] = None,
+ curvatures: Optional[Dict[int, NDArray[float64]]] = None,
+ relative_scores: bool = True,
+) -> Dict[int, float]:
+ """
+ Suggest the optimal number of RC elements to use based on the average distance between sign changes of the curvatures of the fitted impedance spectrum.
+ The curvatures at each point of the fitted impedance spectrum is approximated using an osculating circle.
+ The largest average distance should occur at the lowest number of RC elements, but the optimum coincides with a local maximum at an intermediate number of RC elements.
+ The average distance will tend towards one as the number of RC elements is incremented further.
+
+ References:
+
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+ - C. Plank, T. Rüther, and M.A. Danzer, 2022, 2022 International Workshop on Impedance Spectroscopy (IWIS), 1-6, (https://doi.org/10.1109/IWIS57888.2022.9975131)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the lower limit must have a smaller value than the upper limit.
+
+ upper_limit: int, optional
+ The upper limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the upper limit must have a greater value than the lower limit.
+
+ subdivision: int, optional
+ If greater than zero, then that number of additional frequencies are inserted into each frequency interval.
+
+ subdivided_frequencies: Optional[Frequencies], optional
+ Frequencies that have already been subdivided. If multiple methods that require subdividing frequencies will be used, then this provides a means of reusing those frequencies.
+
+ curvatures: Optional[Dict[int, NDArray[float64]]], optional
+ Curvatures that have already been estimated. If multiple methods that require curvatures will be used, then this provides a means of reusing those curvatures.
+
+ relative_scores: bool, optional
+ Return relative scores ranging from 0.0 to 1.0 (from worst to best) rather than the raw values.
+
+ Returns
+ -------
+ Dict[int, float]
+ """
+ f: Frequencies = tests[0].get_frequencies()
+ m: int = len(f)
+ if subdivided_frequencies is not None:
+ f = subdivided_frequencies
+ elif subdivision > 0:
+ f = subdivide_frequencies(f, subdivision=subdivision)
+ n: int = len(f)
+
+ scores: Dict[int, float] = _suggest(
+ circuits={t.num_RC: t.circuit for t in tests},
+ f=f,
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ curvatures=curvatures,
+ relative_scores=relative_scores,
+ )
+
+ return {num_RC: value * m / n for num_RC, value in scores.items()}
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/method_6.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_6.py
new file mode 100644
index 0000000..43bc9cc
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/method_6.py
@@ -0,0 +1,156 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+
+from numpy import (
+ float64,
+ fromiter,
+ int64,
+)
+from numpy.typing import NDArray
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Tuple,
+)
+from .utility.common import _truncate_circuits
+from .utility.cubic import (
+ _cubicish_function,
+ _fit_cubic_function,
+ _fit_cubicish_function,
+)
+from .utility.tau_var_sum import _calculate_log_sum_abs_tau_var
+
+
+def _approximate_trend(circuits: Dict[int, Circuit]) -> Tuple[NDArray[float64], Tuple[float, ...]]:
+ x: NDArray[int64] = fromiter(
+ (num_RC for num_RC in circuits.keys()),
+ dtype=int64,
+ count=len(circuits),
+ )
+ raw_y: NDArray[float64] = fromiter(
+ map(_calculate_log_sum_abs_tau_var, circuits.values()),
+ dtype=float64,
+ count=len(circuits),
+ )
+
+ num_points: int = 5
+ threshold: float64 = min(raw_y[:num_points])
+
+ n: int
+ y: float64
+ for n, y in enumerate(raw_y):
+ if n < num_points:
+ continue
+ elif y <= threshold:
+ break
+
+ p: Tuple[float64, ...] = _fit_cubic_function(x[:n], raw_y[:n])
+ n += 10
+ p = _fit_cubicish_function(x[:n], raw_y[:n], p0=p)
+
+ return (_cubicish_function(x, *p), p)
+
+
+def _suggest(
+ circuits: Dict[int, Circuit],
+ lower_limit: int,
+ upper_limit: int,
+ relative_scores: bool,
+) -> Dict[int, float]:
+ smooth_y: Dict[int, float64] = {
+ num_RC: v
+ for num_RC, v in zip(
+ circuits.keys(),
+ _approximate_trend(circuits)[0],
+ )
+ }
+
+ if not relative_scores:
+ return smooth_y
+
+ threshold: float64 = smooth_y[min(smooth_y.keys())]
+ scores: Dict[int, float64] = {num_RC: threshold for num_RC in circuits.keys()}
+ circuits = _truncate_circuits(circuits, lower_limit, upper_limit)
+ scores.update(
+ {
+ num_RC: (smooth_y[num_RC] if smooth_y[num_RC] > threshold else threshold)
+ for num_RC in circuits.keys()
+ }
+ )
+
+ min_score: float = min(scores.values())
+ max_score: float = max(scores.values()) - min_score
+ if max_score == 0.0:
+ return {num_RC: 1.0 for num_RC in scores.keys()}
+
+ return {
+ num_RC: float((scores[num_RC] - min_score) / max_score)
+ for num_RC in circuits.keys()
+ }
+
+
+def suggest(
+ tests: List[KramersKronigResult],
+ lower_limit: int = 0,
+ upper_limit: int = 0,
+ relative_scores: bool = True,
+) -> Dict[int, float]:
+ """
+ Suggest the optimal number of RC elements to use based on the approximate position of the apex in a plot of |log sum abs tau R| versus the number of RC elements.
+ If the tests were performed on the admittance representation of the immittance data, then :math:`C_k` is substituted for :math:`R_k`.
+ The sum grows initially as the number of RC elements increases.
+ However, the magnitudes of the fitted :math:`R_k` (or :math:`C_k`) also tend to increase, which causes the magnitudes of the corresponding :math:`C_k` (or :math:`R_k`) to decrease.
+ Thus, the sum begins to decline despite the increasing number of RC elements and the fitted impedance spectrum begins to oscillate (i.e., overfitting takes place).
+ The apex should coincide with or be near the optimum.
+
+ References:
+
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ tests: List[|KramersKronigResult|]
+ The test results to evaluate.
+
+ lower_limit: int, optional
+ The lower limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the lower limit must have a smaller value than the upper limit.
+
+ upper_limit: int, optional
+ The upper limit to enforce for the number of RC elements.
+ If this value is less than one, then no limit is enforced.
+ If both the lower and upper limit are greater than zero, then the upper limit must have a greater value than the lower limit.
+
+ relative_scores: bool, optional
+ Return relative scores ranging from 0.0 to 1.0 (from worst to best) rather than the raw values.
+
+ Returns
+ -------
+ Dict[int, float]
+ """
+ return _suggest(
+ circuits={t.num_RC: t.circuit for t in tests},
+ lower_limit=lower_limit,
+ upper_limit=upper_limit,
+ relative_scores=relative_scores,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/representation.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/representation.py
new file mode 100644
index 0000000..3850040
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/representation.py
@@ -0,0 +1,494 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from numpy import (
+ arctan2,
+ argmax,
+ argmin,
+ complex128,
+ cos,
+ degrees,
+ float64,
+ inf,
+ isnan,
+ linspace,
+ log10 as log,
+ pi,
+ sign,
+ sin,
+ tan,
+)
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.typing import Frequencies
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Tuple,
+ NDArray,
+ _is_boolean,
+ _is_integer,
+ _is_floating,
+)
+from .utility.osculating_circle import (
+ _get_osculating_circle,
+ _find_x_axis_intersections,
+)
+from .utility.pseudo_chi_squared import _calculate_intercept_of_lines
+
+
+_DEBUG = bool(0)
+
+
+def _extrapolate_spectrum(X_slope: NDArray[complex128], admittance: bool) -> float64:
+ numerator: float64 = (X_slope[0] - X_slope[1]).imag
+ denominator: float64 = (X_slope[0] - X_slope[1]).real
+
+ slope: float64
+ if denominator == 0.0:
+ slope = inf if numerator > 0 else -inf
+ else:
+ slope = numerator / denominator
+
+ slope *= 1 if admittance else -1
+
+ return slope
+
+
+def _does_extrapolated_slope_tend_to_zero(
+ X_slope: NDArray[complex128],
+ high_frequency: bool,
+) -> bool:
+ if high_frequency and abs(X_slope[0].imag) < abs(X_slope[1].imag):
+ return True
+ elif not high_frequency and abs(X_slope[-1].imag) < abs(X_slope[-2].imag):
+ return True
+ elif abs(X_slope[-1].imag) == abs(X_slope[-2].imag) == 0.0:
+ return True
+
+ return False
+
+
+def _pick_intersection(
+ intersections: List[Tuple[float, float]],
+ x_center: float,
+ y_center: float,
+ kappa: float,
+ X_circle: NDArray[complex128],
+ admittance: bool,
+) -> Tuple[float, float, float, float, float, float, float]:
+ x_start: float = X_circle[1].real
+ y_start: float = X_circle[1].imag * (1 if admittance else -1)
+
+ start_angle: float = arctan2(y_start - y_center, x_start - x_center)
+ start_angle += 2 * pi
+ start_angle %= 2 * pi
+
+ rotation_direction: int = int(sign(kappa)) * (-1 if admittance else 1)
+
+ candidates: List[Tuple[float, float, float, float]] = []
+ x_end: float
+ y_end: float
+ for x_end, y_end in intersections:
+ end_angle: float = arctan2(y_end - y_center, x_end - x_center)
+ end_angle += 2 * pi
+ end_angle %= 2 * pi
+
+ angle_difference: float = end_angle - start_angle
+ angle_difference *= rotation_direction
+ if angle_difference < 0.0:
+ angle_difference += 2 * pi
+
+ candidates.append((x_end, y_end, end_angle, angle_difference))
+
+ if _DEBUG:
+ print(
+ f"{degrees(start_angle)=:.3f}, {degrees(end_angle)=:.3f}, {degrees(angle_difference)=:.3f}"
+ )
+
+ candidates.sort(key=lambda t: t[-1])
+ x_end, y_end, end_angle, angle_difference = candidates[0]
+
+ return (
+ start_angle,
+ x_start,
+ y_start,
+ end_angle,
+ x_end,
+ y_end,
+ angle_difference,
+ )
+
+
+def _imaginary_extremes_tend_toward_zero(
+ representations: List[Tuple[KramersKronigResult, Dict[int, float], int, int]]
+) -> List[float]:
+ results: List[float] = []
+
+ i: int
+ test: KramersKronigResult
+ for i, (test, *_) in enumerate(representations):
+ if _DEBUG:
+ from pyimpspec.plot.mpl import show, plot_nyquist
+ from matplotlib.pyplot import Circle
+
+ figure, axes = plot_nyquist(
+ test,
+ colors={"impedance": "black"},
+ admittance=test.admittance,
+ legend=False,
+ )
+ figure, axes = plot_nyquist(
+ test,
+ colors={"impedance": "red"},
+ admittance=test.admittance,
+ line=True,
+ legend=False,
+ figure=figure,
+ axes=axes,
+ )
+ ax = axes[0]
+ x_lim = ax.get_xlim()
+ y_lim = ax.get_ylim()
+ ax.axhline(0.0, color="black", alpha=0.5)
+
+ score: float = 0.0
+
+ f: Frequencies = test.get_frequencies()
+
+ j: int
+ f_circle: NDArray[float64]
+ for j, f_circle in enumerate((f[:3][::-1], f[-3:])):
+ if _DEBUG:
+ color: str = "blue" if j == 0 else "green"
+ print()
+
+ X_circle: NDArray[complex128] = test.circuit.get_impedances(f_circle) ** (
+ -1 if test.admittance else 1
+ )
+
+ x_center: float
+ y_center: float
+ kappa: float
+ x_center, y_center, kappa = _get_osculating_circle(
+ test.circuit,
+ f_circle,
+ test.admittance,
+ )
+
+ rotation_direction: int = int(sign(kappa))
+ if test.admittance:
+ rotation_direction *= -1
+ else:
+ y_center *= -1
+
+ f_slope: List[float] = (
+ [
+ max(f),
+ max(f) * 0.9999,
+ ]
+ if j == 0
+ else [
+ min(f) * 1.0001,
+ min(f),
+ ]
+ )
+ X_slope: NDArray[complex128] = test.circuit.get_impedances(
+ f_slope,
+ ) ** (-1 if test.admittance else 1)
+ slope: float64 = _extrapolate_spectrum(X_slope, test.admittance)
+
+ extrapolated_slope_tends_to_zero: bool = (
+ _does_extrapolated_slope_tend_to_zero(
+ X_slope,
+ high_frequency=j == 0,
+ )
+ )
+ if extrapolated_slope_tends_to_zero:
+ score += 0.5
+
+ if _DEBUG:
+ print(
+ f"{test.admittance=}, {test.num_RC=}, {x_center=}, {y_center=}, {kappa=}, {slope=}"
+ )
+ offset: float = (
+ X_slope[0].imag * (1 if test.admittance else -1)
+ - slope * X_slope[0].real
+ )
+ x_intercept: float = _calculate_intercept_of_lines(
+ slope,
+ offset,
+ 0.0,
+ 0.0,
+ )
+ if extrapolated_slope_tends_to_zero:
+ ax.plot(
+ [X_slope[j].real, x_intercept],
+ [slope * _x + offset for _x in [X_slope[j].real, x_intercept]],
+ color=color,
+ linestyle=":",
+ )
+ ax.scatter(
+ x_intercept,
+ slope * x_intercept + offset,
+ edgecolor=color,
+ facecolor="none",
+ marker="o",
+ )
+ else:
+ ax.axline(
+ (
+ X_slope[j].real,
+ X_slope[j].imag * (1 if test.admittance else -1),
+ ),
+ slope=slope,
+ color=color,
+ linestyle=":",
+ )
+
+ if kappa == 0.0 or isnan(x_center) or isnan(y_center):
+ # Straight line
+ continue
+
+ radius: float = abs(1 / kappa)
+ intersections: List[Tuple[float, float]]
+ intersections = _find_x_axis_intersections(x_center, y_center, radius)
+ if len(intersections) > 0:
+ start_angle: float
+ x_start: float
+ y_start: float
+ end_angle: float
+ x_end: float
+ y_end: float
+ angle_difference: float
+ (
+ start_angle,
+ x_start,
+ y_start,
+ end_angle,
+ x_end,
+ y_end,
+ angle_difference,
+ ) = _pick_intersection(
+ intersections,
+ x_center,
+ y_center,
+ kappa,
+ X_circle,
+ test.admittance,
+ )
+
+ score += 0.5 * (1.0 - angle_difference / (2 * pi))
+
+ if _DEBUG:
+ ax.axline(
+ (x_center, y_center),
+ slope=tan(start_angle),
+ color="magenta",
+ linestyle=":",
+ )
+ ax.scatter(
+ x_start,
+ y_start,
+ facecolor="none",
+ edgecolor="magenta",
+ marker="o",
+ zorder=20,
+ )
+
+ ax.axline(
+ (x_center, y_center),
+ slope=tan(end_angle),
+ color="magenta",
+ linestyle="--",
+ )
+ ax.scatter(
+ x_end,
+ y_end,
+ edgecolor="none",
+ facecolor="magenta",
+ marker="o",
+ zorder=20,
+ )
+
+ angles = linspace(
+ start_angle,
+ start_angle + angle_difference * rotation_direction,
+ num=360,
+ )
+ radius_factor = 0.9
+ x = radius * radius_factor * cos(angles) + x_center
+ y = radius * radius_factor * sin(angles) + y_center
+ ax.plot(x, y, color="magenta", linestyle=":")
+
+ if _DEBUG:
+ ax.add_patch(
+ Circle(
+ (x_center, y_center),
+ radius,
+ edgecolor=color,
+ facecolor="none",
+ linestyle="--",
+ )
+ )
+ ax.scatter(x_center, y_center, marker="+", color=color)
+
+ print(f"{intersections=}")
+ for x, y in intersections:
+ ax.scatter(x, y, color=color, marker="x")
+
+ results.append(score)
+
+ if _DEBUG:
+ print(f"{results[i]=}, {representations[i][0].admittance=}")
+ ax.set_xlim(*x_lim)
+ ax.set_ylim(*y_lim)
+ show()
+
+ return results
+
+
+def _suggest_representation(
+ representations: List[Tuple[KramersKronigResult, Dict[int, float], int, int]]
+) -> Tuple[KramersKronigResult, Dict[int, float], int, int]:
+ if not (len(representations) == 2):
+ raise ValueError(
+ f"Expected a list with two tuples instead of {representations=}"
+ )
+
+ representations = sorted(representations, key=lambda t: t[0].pseudo_chisqr)
+
+ log_pseudo_chisqr: NDArray[float64] = log(
+ [t[0].pseudo_chisqr for t in representations]
+ )
+ if abs(log_pseudo_chisqr[1] - log_pseudo_chisqr[0]) > 0.5:
+ if _DEBUG:
+ i: int
+ for i, (test, *_) in enumerate(representations):
+ print(f"{test.admittance=}, {test.num_RC=}, {log_pseudo_chisqr[i]=}")
+
+ return representations[0]
+
+ scores: Dict[int, float] = {i: 0 for i in range(0, len(representations))}
+
+ if _DEBUG:
+ admittances = {i: representations[i][0].admittance for i in scores.keys()}
+
+ num_RCs: List[float] = [t[0].num_RC + t[0].pseudo_chisqr for t in representations]
+ i = argmin(num_RCs)
+ scores[i] += 1
+ if _DEBUG:
+ print(f"{scores=}, {admittances=}, {num_RCs=}")
+
+ lower_limits: List[float] = [t[2] + t[0].pseudo_chisqr for t in representations]
+ i = argmin(lower_limits)
+ scores[i] += 1
+ if _DEBUG:
+ print(f"{scores=}, {admittances=}, {lower_limits=}")
+
+ imaginary_extremes_tend_to_zero: List[float] = _imaginary_extremes_tend_toward_zero(
+ representations
+ )
+ i = argmax(imaginary_extremes_tend_to_zero)
+ scores[i] += 1
+ if _DEBUG:
+ print(f"{scores=}, {admittances=}, {imaginary_extremes_tend_to_zero=}")
+
+ score: float
+ for i, score in sorted(
+ scores.items(),
+ key=lambda kv: (
+ max(scores.values()) - kv[1],
+ representations[kv[0]][0].pseudo_chisqr,
+ ),
+ ):
+ return representations[i]
+
+ return representations[0]
+
+
+def suggest(
+ suggestions: List[Tuple[KramersKronigResult, Dict[int, float], int, int]]
+) -> Tuple[KramersKronigResult, Dict[int, float], int, int]:
+ """
+ Suggest the most appropriate representation (impedance or admittance) of the immittance spectrum that was tested.
+ If the difference between |pseudo chi-squared| values is greater than 0.5 decades, then the representation that provides the best fit is picked.
+ Otherwise, the representations are scored according to various criteria:
+
+ - One point to whichever has the lowest number of RC elements.
+ - One point to whichever has the lowest lower limit for the number of RC elements.
+ - One point to whichever comes closest to having the imaginary part of each frequency extreme reach zero.
+
+ The tuple in the input list that corresponds to the representation with the highest score is returned.
+
+ References:
+
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ suggestions: List[Tuple[KramersKronigResult, Dict[int, float], int, int]]
+ A list obtained by calling |suggest_num_RC| multiple times and collecting the return values in a list.
+
+ Returns
+ -------
+ Tuple[KramersKronigResult, Dict[int, float], int, int]
+ """
+ if not isinstance(suggestions, list):
+ raise TypeError(f"Expected a list instead of {suggestions=}")
+ elif len(suggestions) < 1:
+ raise ValueError(f"Expected at least on item in {suggestions=}")
+ elif not all(map(lambda t: isinstance(t, tuple) and len(t) == 4, suggestions)):
+ raise TypeError(
+ f"Expected a list of tuples with four items instead of {suggestions=}"
+ )
+
+ for suggestion in suggestions:
+ test: KramersKronigResult
+ scores: Dict[int, float]
+ lower_limit: int
+ upper_limit: int
+ test, scores, lower_limit, upper_limit = suggestion
+
+ if not _is_boolean(test.admittance):
+ raise TypeError(f"Expected a boolean instead of {test.admittance=}")
+ elif not isinstance(scores, dict):
+ raise TypeError(
+ f"Expected the second item in each tuple to be a dictionary instead of {suggestion=}"
+ )
+ elif not all(map(lambda k: _is_integer(k), scores.keys())):
+ raise TypeError(
+ f"Expected only integer keys in the second item of the tuple instead of {scores=}"
+ )
+ elif not all(map(lambda v: _is_floating(v), scores.values())):
+ raise TypeError(
+ f"Expected only float values in the second item of the tuple instead of {scores=}"
+ )
+ elif not _is_integer(lower_limit):
+ raise TypeError(
+ f"Expected the third item in each tuple to be an integer instead of {suggestion=}"
+ )
+ elif not _is_integer(upper_limit):
+ raise TypeError(
+ f"Expected the fourth item in each tuple to be an integer instead of {suggestion=}"
+ )
+
+ if len(suggestions) == 1:
+ return suggestions[0]
+
+ return _suggest_representation(suggestions)
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/__init__.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/__init__.py
new file mode 100644
index 0000000..0132a0d
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/__init__.py
@@ -0,0 +1,21 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from .osculating_circle import calculate_curvatures
+from .common import subdivide_frequencies
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/common.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/common.py
new file mode 100644
index 0000000..f43a5d5
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/common.py
@@ -0,0 +1,129 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from numpy import (
+ array,
+ log10 as log,
+ logspace,
+)
+from pyimpspec.analysis.kramers_kronig.result import KramersKronigResult
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.kramers_kronig import KramersKronigAdmittanceRC
+from pyimpspec.typing.aliases import (
+ Frequencies,
+ Frequency,
+)
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+)
+
+
+def _truncate_circuits(
+ circuits: Dict[int, Circuit],
+ lower_limit: int,
+ upper_limit: int,
+) -> Dict[int, Circuit]:
+ valid_keys: List[int]
+
+ if lower_limit > 0 and upper_limit > 0:
+ valid_keys = [
+ num_RC for num_RC in circuits.keys() if lower_limit <= num_RC <= upper_limit
+ ]
+ elif upper_limit > 0:
+ valid_keys = [num_RC for num_RC in circuits.keys() if num_RC <= upper_limit]
+ elif lower_limit > 0:
+ valid_keys = [num_RC for num_RC in circuits.keys() if lower_limit <= num_RC]
+ else:
+ valid_keys = list(circuits.keys())
+
+ if len(valid_keys) == 0:
+ raise ValueError(
+ f"The specified limits mean that there are no valid number of RC elements to use: {lower_limit=}, {upper_limit=}"
+ )
+
+ return {num_RC: circuits[num_RC] for num_RC in valid_keys}
+
+
+def _is_admittance_test_circuit(circuit: Circuit) -> bool:
+ return any(
+ map(
+ lambda e: isinstance(e, KramersKronigAdmittanceRC),
+ circuit.get_elements(recursive=True),
+ )
+ )
+
+
+def subdivide_frequencies(
+ frequencies: Frequencies,
+ subdivision: int = 4,
+) -> Frequencies:
+ """
+ Insert additional frequencies between each frequency.
+
+ Parameters
+ ----------
+ frequencies: Frequencies
+ The original frequencies that are to be subdivided.
+
+ subdivision: int, optional
+ The number of frequencies added between each of the original frequencies.
+
+ Returns
+ -------
+ Frequencies
+ """
+ if subdivision < 1:
+ raise ValueError(f"Expected {subdivision=} > 0")
+
+ new_frequencies: List[float] = [frequencies[0]]
+
+ f1: Frequency
+ f2: Frequency
+ for f1, f2 in zip(frequencies[:-1], frequencies[1:]):
+ new_frequencies.extend(
+ logspace(
+ log(f1),
+ log(f2),
+ subdivision + 2,
+ ).tolist()[1:]
+ )
+
+ m: int = len(new_frequencies)
+ n: int = len(frequencies)
+ if not (m == n + (n - 1) * subdivision):
+ raise ValueError(f"Expected {m=} == {n + (n - 1) * subdivision=}")
+
+ return array(new_frequencies)
+
+
+def _generate_pseudo_chisqr_offsets(
+ pseudo_chisqrs: Dict[int, float],
+ factor: float,
+) -> Dict[int, float]:
+ if len(pseudo_chisqrs) == 1 or factor == 0.0:
+ return {num_RC: 0.0 for num_RC, pseudo_chisqr in pseudo_chisqrs.items()}
+
+ minimum: float = min(pseudo_chisqrs.values())
+ maximum: float = max(pseudo_chisqrs.values())
+
+ return {
+ num_RC: ((pseudo_chisqr - minimum) / (maximum - minimum)) * factor
+ for num_RC, pseudo_chisqr in pseudo_chisqrs.items()
+ }
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/cubic.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/cubic.py
new file mode 100644
index 0000000..745c3ec
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/cubic.py
@@ -0,0 +1,122 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from typing import (
+ Optional,
+ Tuple,
+)
+from warnings import (
+ catch_warnings,
+ filterwarnings,
+)
+from numpy import (
+ float64,
+ int64,
+ ndarray,
+)
+from numpy.random import normal
+from numpy.typing import NDArray
+
+
+def _cubic_function(x, a: float, b: float, c: float, d: float):
+ return a * x**3 + b * x**2 + c * x + d
+
+
+def _cubicish_function(x, a: float, b: float, c: float, d: float):
+ y = _cubic_function(x, a, b, c, d)
+ if a > 0.0:
+ # 2nd derivative
+ x_sign_change: float = (-2 * b) / (6 * a)
+ # 1st derivative
+ slope: float = 3 * a * x_sign_change**2 + 2 * b * x_sign_change + c
+ offset: float = (
+ _cubic_function(x_sign_change, a, b, c, d) - slope * x_sign_change
+ )
+
+ if isinstance(x, ndarray):
+ i: int = abs(x - x_sign_change).argmin()
+ y[i:] = slope * x[i:] + offset
+ elif x >= x_sign_change:
+ y = slope * x + offset
+
+ return y
+
+
+def _fit_cubic_function(
+ x: NDArray[int64],
+ y: NDArray[float64],
+ p0: Optional[Tuple[float64, ...]] = None,
+) -> Tuple[float64, ...]:
+ from scipy.optimize import (
+ OptimizeWarning,
+ curve_fit,
+ )
+
+ for _ in range(10):
+ with catch_warnings():
+ filterwarnings("ignore", category=RuntimeWarning)
+ filterwarnings("ignore", category=OptimizeWarning)
+ try:
+ p: NDArray[float64] = curve_fit(
+ _cubic_function,
+ xdata=x,
+ ydata=y,
+ p0=normal(0.0, 1.0, 4) if p0 is None else p0,
+ maxfev=100000,
+ )[0]
+ except RuntimeError:
+ continue
+ except TypeError:
+ break
+
+ return tuple(p)
+
+ raise ValueError("Failed to fit cubic function!")
+
+
+def _fit_cubicish_function(
+ x: NDArray[int64],
+ y: NDArray[float64],
+ p0: Optional[Tuple[float64, ...]] = None,
+) -> Tuple[float64, ...]:
+ from scipy.optimize import (
+ OptimizeWarning,
+ curve_fit,
+ )
+
+ for _ in range(10):
+ with catch_warnings():
+ filterwarnings("ignore", category=RuntimeWarning)
+ filterwarnings("ignore", category=OptimizeWarning)
+ try:
+ p: NDArray[float64] = curve_fit(
+ _cubicish_function,
+ xdata=x,
+ ydata=y,
+ p0=normal(0.0, 1.0, 4) if p0 is None else p0,
+ maxfev=100000,
+ )[0]
+ except RuntimeError:
+ continue
+ except TypeError:
+ break
+
+ return tuple(p)
+
+ raise ValueError("Failed to fit cubic function!")
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/logistic.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/logistic.py
new file mode 100644
index 0000000..f3d5ac5
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/logistic.py
@@ -0,0 +1,28 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from numpy import exp
+
+
+def _logistic_function(x, a: float, b: float, c: float, d: float):
+ return a / (1 + exp(-b * (x - c))) + d
+
+
+def _logistic_derivative(x, a: float, b: float, c: float, d: float):
+ return a * b * exp(-b * (x - c)) / (exp(-b * (x - c)) + 1) ** 2
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/mu.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/mu.py
new file mode 100644
index 0000000..ca5b6d3
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/mu.py
@@ -0,0 +1,79 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from typing import Dict
+from numpy import nan
+
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.kramers_kronig import (
+ KramersKronigAdmittanceRC,
+ KramersKronigRC,
+)
+from .common import _is_admittance_test_circuit
+
+
+def _calculate_mu(circuit: Circuit) -> float:
+ r"""
+ Calculate :math:`\mu \in [0.0, 1.0]` for a |KramersKronigResult|.
+ Over- and underfitting are represented by 0.0 and 1.0, respectively.
+ Based on Eq. 21 on page 25 of Schönleber et al. (2014):
+
+ :math:`\mu = 1 - \frac{\Sigma_{R_k < 0} |R_k|}{\Sigma_{R_k \geq 0} |R_k|}`
+
+ with some modifications:
+
+ - The return value is clamped to the range [0.0, 1.0] (see the point below for an exception).
+ - If the denominator is zero, then |numpy.nan| is returned.
+ - If the test was performed on admittance data, then :math:`C_k` is substituted for :math:`R_k`.
+
+ References:
+
+ - M. Schönleber, D. Klotz, and E. Ivers-Tiffée, 2014, Electrochim. Acta, 131, 20-27 (https://doi.org/10.1016/j.electacta.2014.01.034)
+
+ Parameters
+ ----------
+ circuit: |Circuit|
+ The circuit that was fitted as part of the Kramers-Kronig test.
+
+ Returns
+ -------
+ float
+ """
+ key: str = "C" if _is_admittance_test_circuit(circuit) else "R"
+ mass_of_negatives: float = 0.0
+ mass_of_positives: float = 0.0
+
+ element: Element
+ for element in circuit.get_elements(recursive=True):
+ if type(element) not in (KramersKronigAdmittanceRC, KramersKronigRC):
+ continue
+
+ parameters: Dict[str, float] = element.get_values()
+ value: float = parameters[key]
+ if value >= 0.0:
+ mass_of_positives += value
+ else:
+ mass_of_negatives += abs(value)
+
+ if mass_of_positives == 0.0:
+ return nan
+
+ mu: float = 1.0 - mass_of_negatives / mass_of_positives
+ return min((1.0, max((0.0, mu))))
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/osculating_circle.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/osculating_circle.py
new file mode 100644
index 0000000..51dbe87
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/osculating_circle.py
@@ -0,0 +1,269 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from warnings import (
+ catch_warnings,
+ filterwarnings,
+)
+from numpy import (
+ arccos,
+ argmin,
+ argwhere,
+ array,
+ complex128,
+ dot,
+ float64,
+ int64,
+ nan,
+ ones,
+ sign,
+ sin,
+ sqrt,
+ zeros,
+)
+from numpy.linalg import (
+ det,
+ norm,
+)
+from numpy.typing import NDArray
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing.aliases import (
+ ComplexImpedance,
+ ComplexImpedances,
+)
+from pyimpspec.typing.helpers import (
+ List,
+ Tuple,
+)
+
+
+def _find_x_axis_intersections(
+ x_center: float,
+ y_center: float,
+ radius: float,
+) -> List[Tuple[float, float]]:
+ from sympy.geometry import (
+ Circle,
+ Line,
+ Point,
+ )
+
+ center = Point(x_center, y_center)
+ circle = Circle(center, radius)
+ x_axis = Line(Point(0.0, 0.0), slope=0.0)
+
+ return [(float(point.x), float(point.y)) for point in circle.intersection(x_axis)]
+
+
+def _get_osculating_circle(
+ circuit: Circuit,
+ f: List[float],
+ admittance: bool,
+) -> Tuple[float, float, float]:
+ if not (len(f) == 3):
+ raise ValueError(f"Expected a list with three items instead of {f=}")
+
+ X_circle: NDArray[complex128] = circuit.get_impedances(f) ** (
+ -1 if admittance else 1
+ )
+ kappa: float64 = _fit_osculating_circle(*X_circle)
+ if kappa == 0.0:
+ return (nan, nan, 0.0)
+
+ radius: float64 = abs(1 / kappa)
+
+ X_tangent: NDArray[complex128] = circuit.get_impedances(
+ [f[1] * (1 + 1e-6), f[1] * (1 - 1e-6)]
+ ) ** (-1 if admittance else 1)
+ slope: float64 = (X_tangent[1] - X_tangent[0]).imag / (
+ X_tangent[1] - X_tangent[0]
+ ).real
+ normal: float64 = -1 / slope
+
+ dx: float64 = radius / sqrt(1 + normal**2)
+ dy: float64 = normal * dx
+
+ x_center: float64 = X_circle[1].real
+ y_center: float64 = X_circle[1].imag
+
+ centers: NDArray[complex128] = zeros(2, dtype=complex128)
+ centers[0] = complex(x_center + dx, y_center + dy)
+ centers[1] = complex(x_center - dx, y_center - dy)
+
+ vectors: NDArray[complex128] = zeros((2, 3), dtype=complex128)
+ distances: NDArray[float64] = zeros((2, 3), dtype=float64)
+
+ for i in range(0, 2):
+ for j in range(0, 3):
+ vectors[i][j] = X_circle[j] - centers[i]
+ distances[i][j] = abs(vectors[i][j])
+
+ sums_of_distances: List[float] = [abs(sum(row) - radius * 3) for row in distances]
+ i: int = argmin(sums_of_distances)
+
+ x_center, y_center = centers[i].real, centers[i].imag
+
+ return (x_center, y_center, kappa)
+
+
+def _fit_osculating_circle(
+ Z_i: ComplexImpedance,
+ Z_j: ComplexImpedance,
+ Z_k: ComplexImpedance,
+) -> float64:
+ # The initial slower implementation that is still used as part of
+ # suggesting a representation via the _get_osculating_circle function.
+ # This implementation is also used to verify the correctness of the faster
+ # implementation (i.e., the calculate_curvatures function).
+ a: NDArray[float64] = array([(Z_j - Z_i).real, (Z_j - Z_i).imag])
+ b: NDArray[float64] = array([(Z_k - Z_j).real, (Z_k - Z_j).imag])
+ c: NDArray[float64] = array([(Z_k - Z_i).real, (Z_k - Z_i).imag])
+
+ a_dot_b: float64 = dot(a, b)
+ a_norm: float64 = norm(a)
+ b_norm: float64 = norm(b)
+ a_norm_dot_b_norm: float64 = dot(a_norm, b_norm)
+ cos_alpha: float64 = a_dot_b / a_norm_dot_b_norm
+
+ # To handle potential floating point inaccuracy
+ # that could cause issues when cos_alpha is provided as
+ # input to numpy.arccos
+ if cos_alpha < -1.0:
+ cos_alpha = -1.0
+ elif cos_alpha > 1.0:
+ cos_alpha = 1.0
+ try:
+ with catch_warnings():
+ filterwarnings("error", category=RuntimeWarning)
+ alpha: float64 = arccos(cos_alpha)
+ except RuntimeWarning as e:
+ print(a_dot_b, a_norm, b_norm, a_norm_dot_b_norm)
+ raise e
+
+ abs_kappa: float64 = 2 * sin(alpha) / norm(c)
+
+ # Determine the direction of the curvature using the sign of the following
+ # determinant (<0 means clockwise, >0 means counter-clockwise in a
+ # typical Nyquist plot where -Z" is plotted versus Z').
+ matrix: NDArray[float64] = ones((3, 3), dtype=float64)
+ matrix[:, 0] = [Z_i.real, Z_j.real, Z_k.real]
+ matrix[:, 1] = [-Z_i.imag, -Z_j.imag, -Z_k.imag]
+
+ return abs_kappa * sign(det(matrix))
+
+
+def calculate_curvatures(Z: ComplexImpedances) -> NDArray[float64]:
+ """
+ Estimate the curvatures of an impedance spectrum.
+
+ References:
+
+ - C. Plank, T. Rüther, and M.A. Danzer, 2022, 2022 International Workshop on Impedance Spectroscopy (IWIS), 1-6, (https://doi.org/10.1109/IWIS57888.2022.9975131)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ Z: ComplexImpedances
+ The impedances to use when estimating the curvatures using osculating circles.
+
+ Returns
+ -------
+ NDArray[float64]
+ """
+ kappa: NDArray[float64] = zeros((Z.size - 2, 3), dtype=float64)
+
+ a: NDArray[float64] = zeros((Z.size - 2, 2), dtype=float64)
+ a[:, 0] = (Z[1:-1] - Z[:-2]).real
+ a[:, 1] = (Z[1:-1] - Z[:-2]).imag
+
+ b: NDArray[float64] = zeros((Z.size - 2, 2), dtype=float64)
+ b[:, 0] = (Z[2:] - Z[1:-1]).real
+ b[:, 1] = (Z[2:] - Z[1:-1]).imag
+
+ c: NDArray[float64] = zeros((Z.size - 2, 2), dtype=float64)
+ c[:, 0] = (Z[2:] - Z[:-2]).real
+ c[:, 1] = (Z[2:] - Z[:-2]).imag
+
+ a_dot_b: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+ a_norm: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+ b_norm: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+ a_norm_dot_b_norm: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+ cos_alpha: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+ c_norm: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+
+ i: int
+ for i in range(0, a.shape[0]):
+ a_dot_b[i] = dot(a[i, :], b[i, :])
+ a_norm[i] = norm(a[i, :])
+ b_norm[i] = norm(b[i, :])
+ a_norm_dot_b_norm[i] = dot(a_norm[i], b_norm[i])
+ cos_alpha[i] = a_dot_b[i] / a_norm_dot_b_norm[i]
+ c_norm[i] = norm(c[i, :])
+
+ indices: NDArray[int64] = argwhere(cos_alpha < -1.0).flatten()
+ if indices.size > 0:
+ cos_alpha[indices] = -1.0
+
+ indices = argwhere(cos_alpha > 1.0).flatten()
+ if indices.size > 0:
+ cos_alpha[indices] = 1.0
+
+ try:
+ with catch_warnings():
+ filterwarnings("error", category=RuntimeWarning)
+ alpha: NDArray[float64] = arccos(cos_alpha)
+ except RuntimeWarning as e:
+ print(a_dot_b, a_norm, b_norm, a_norm_dot_b_norm)
+ raise e
+
+ abs_kappa: NDArray[float64] = 2 * sin(alpha) / c_norm
+
+ matrix: NDArray[float64] = ones((Z.size, 3), dtype=float64)
+ matrix[:, 0] = Z.real
+ matrix[:, 1] = -Z.imag
+ determinant: NDArray[float64] = zeros(Z.size - 2, dtype=float64)
+
+ for i in range(0, a.shape[0]):
+ determinant[i] = det(matrix[i:i+3, :])
+
+ kappa: NDArray[float64] = abs_kappa * sign(determinant)
+
+ return kappa
+
+
+def _calculate_sign_change_distances(kappas: NDArray[float64]) -> NDArray[int64]:
+ distances: List[int] = []
+ previous_sign: int = sign(kappas[0])
+ previous_sign_index: int = 0
+
+ i: int
+ for i in range(1, len(kappas)):
+ if kappas[i] == 0.0:
+ continue
+
+ current_sign: int = sign(kappas[i])
+ if current_sign != previous_sign:
+ distances.append(i - previous_sign_index)
+ previous_sign = current_sign
+ previous_sign_index = i
+
+ if len(distances) == 0:
+ distances.append(i)
+
+ return array(distances)
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/pseudo_chi_squared.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/pseudo_chi_squared.py
new file mode 100644
index 0000000..229efab
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/pseudo_chi_squared.py
@@ -0,0 +1,221 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from warnings import (
+ catch_warnings,
+ filterwarnings,
+)
+from numpy import (
+ array,
+ ceil,
+ float64,
+ log10 as log,
+ nan,
+)
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ NDArray,
+ Tuple,
+ Union,
+ floating,
+)
+
+
+def _ideal_model_function(
+ x: NDArray[floating],
+ a: floating,
+ b: floating,
+ c: floating,
+) -> NDArray[floating]:
+ return -a * log(10**-x + 10**-b) + c
+
+
+def _ideal_model_residual(
+ p: "Parameters", # noqa: F821
+ x: NDArray[floating],
+ y: NDArray[floating],
+) -> NDArray[floating]:
+ return y - _ideal_model_function(x, **p.valuesdict())
+
+
+def _fit_ideal_model(
+ x: NDArray[floating],
+ y: NDArray[floating],
+) -> Tuple[floating, floating, floating]:
+ from lmfit import (
+ Parameters,
+ minimize,
+ )
+ from lmfit.minimizer import MinimizerResult
+
+ if len(x) != len(y):
+ raise ValueError("Expected the same number of x and y points!")
+ elif len(x) < 3:
+ raise ValueError("Expected at least three data points!")
+
+ i: int = max((1, len(y) // 2))
+ a = (y[i] - y[0]) / (x[i] - x[0])
+ b = x[i]
+ c = y[i] + a * log(10 ** -x[i] + 10**-b)
+
+ parameters = Parameters()
+ parameters.add("a", value=a)
+ parameters.add("b", value=b, min=1, max=max(x))
+ parameters.add("c", value=c)
+
+ fit: MinimizerResult = minimize(
+ _ideal_model_residual,
+ parameters,
+ method="leastsq",
+ args=(x, y),
+ )
+ p: Dict[str, floating] = fit.params.valuesdict()
+
+ return (
+ p["a"],
+ p["b"],
+ p["c"],
+ )
+
+
+def _calculate_intercept_of_lines(
+ s1: floating,
+ o1: floating,
+ s2: floating,
+ o2: floating,
+) -> floating:
+ if s1 - s2 == 0.0:
+ raise ZeroDivisionError()
+
+ return (o2 - o1) / (s1 - s2)
+
+
+def _intersecting_lines_function(
+ x: NDArray[floating],
+ s1: floating,
+ o1: floating,
+ s2: floating,
+ o2: floating,
+) -> Union[floating, NDArray[floating]]:
+ y1 = s1 * x + o1
+ y2 = s2 * x + o2
+ intercept = _calculate_intercept_of_lines(s1, o1, s2, o2)
+
+ y: List[floating] = []
+ i: int
+ _x: floating
+ for i, _x in enumerate(x):
+ y.append((y1 if _x < intercept else y2)[i])
+
+ return array(y)
+
+
+def _fit_intersecting_lines(
+ x: NDArray[floating],
+ y: NDArray[floating],
+) -> Tuple[floating, floating, floating, floating]:
+ from scipy.optimize import (
+ OptimizeWarning,
+ curve_fit,
+ )
+
+ i: int = len(x) // 2
+ s1: floating = (y[i] - y[0]) / (x[i] - x[0])
+ o1: floating = y[0] - s1 * x[0]
+ s2: floating = (y[-1] - y[i]) / (x[-1] - x[i])
+ o2: floating = y[-1] - s2 * x[-1]
+
+ for _ in range(10):
+ with catch_warnings():
+ filterwarnings("ignore", category=RuntimeWarning)
+ filterwarnings("ignore", category=OptimizeWarning)
+ try:
+ p: NDArray[floating] = curve_fit(
+ _intersecting_lines_function,
+ xdata=x,
+ ydata=y,
+ p0=(s1, o1, s2, o2),
+ maxfev=1000,
+ )[0]
+ except RuntimeError:
+ continue
+ except TypeError:
+ break
+
+ return tuple(p)
+
+ raise ValueError("Failed to fit the intersecting lines function!")
+
+
+def _approximate_transition_and_end_point(
+ num_RCs: NDArray[float64],
+ log_pseudo_chisqrs: NDArray[float64],
+) -> Tuple[int, int, Tuple[float, float, float, float]]:
+ # First try to determine the maximum num_RC. If the log(X²ps) vs num_RC
+ # plot exhibits numerical instability, which is common for tests like
+ # 'complex-inv', 'real', or 'real-inv', then that will hopefully be
+ # handled appropriately.
+ maximum_distance: int = 10
+ lowest_value: float64 = 0.0
+ lowest_index: int = -1
+
+ i: int = 0
+ y: float64 = log_pseudo_chisqrs[i]
+ for i, y in enumerate(log_pseudo_chisqrs):
+ if lowest_index < 0 or y <= lowest_value:
+ lowest_index = i
+ lowest_value = y
+ elif (i - lowest_index) >= maximum_distance:
+ break
+
+ while (y - lowest_value) > 0.5:
+ i -= 1
+ y = log_pseudo_chisqrs[i]
+
+ # Some test implementations may have a very significant drop in
+ # X²ps at high num_RC when the number of points per decade is low.
+ while (log_pseudo_chisqrs[i-1] - log_pseudo_chisqrs[i]) > 1.0:
+ i -= 1
+
+ max_num_RC: int = int(max(num_RCs[:i]))
+
+ # Try to determine the point where the log(X²ps) vs num_RC plot
+ # transitions from a rapid decrease in error to a more gradual
+ # rate of decrease in error. This point can be considered the lower
+ # limit for the optimal num_RC.
+ try:
+ p: Tuple[float, float, float, float] = _fit_intersecting_lines(
+ num_RCs[:i],
+ log_pseudo_chisqrs[:i],
+ )
+ except ValueError:
+ return (
+ min(num_RCs),
+ max_num_RC,
+ (nan, nan, nan, nan),
+ )
+
+ intercept: int = int(ceil(_calculate_intercept_of_lines(*p)))
+
+ return (
+ intercept,
+ max_num_RC,
+ p,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/tau_var_sum.py b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/tau_var_sum.py
new file mode 100644
index 0000000..351b70e
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/algorithms/utility/tau_var_sum.py
@@ -0,0 +1,46 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from typing import Dict
+from numpy import (
+ float64,
+ log10 as log,
+)
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.kramers_kronig import (
+ KramersKronigAdmittanceRC,
+ KramersKronigRC,
+)
+from .common import _is_admittance_test_circuit
+
+
+def _calculate_log_sum_abs_tau_var(circuit: Circuit) -> float64:
+ key: str = "C" if _is_admittance_test_circuit(circuit) else "R"
+ total: float = 0.0
+
+ element: Element
+ for element in circuit.get_elements():
+ if type(element) not in (KramersKronigRC, KramersKronigAdmittanceRC):
+ continue
+
+ parameters: Dict[str, float] = element.get_values()
+ total += abs(parameters["tau"] / parameters[key])
+
+ return log(total)
diff --git a/src/pyimpspec/analysis/kramers_kronig/cnls.py b/src/pyimpspec/analysis/kramers_kronig/cnls.py
new file mode 100644
index 0000000..b09f199
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/cnls.py
@@ -0,0 +1,163 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from typing import (
+ Dict,
+ Tuple,
+)
+from numpy import (
+ array,
+ complex128,
+ float64,
+ pi,
+)
+from pyimpspec.analysis.fitting import (
+ _from_lmfit,
+ _to_lmfit,
+)
+from numpy.typing import NDArray
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.typing import (
+ ComplexImpedances,
+ Frequencies,
+)
+from pyimpspec.typing.helpers import (
+ _is_boolean,
+ _is_complex_array,
+ _is_floating,
+ _is_floating_array,
+ _is_integer,
+)
+from .utility import (
+ _generate_circuit,
+ _generate_time_constants,
+)
+
+
+def _complex_residual(
+ params: "Parameters", # noqa: F821
+ circuit: Circuit,
+ f: Frequencies,
+ X_exp: NDArray[complex128],
+ weight: NDArray[float64],
+ identifiers: Dict[int, Element],
+ admittance: bool,
+) -> NDArray[float64]:
+ _from_lmfit(params, identifiers)
+ X_fit: ComplexImpedances = circuit.get_impedances(f) ** (-1 if admittance else 1)
+
+ return weight * array(
+ [
+ (X_exp.real - X_fit.real) ** 2,
+ (X_exp.imag - X_fit.imag) ** 2,
+ ]
+ )
+
+
+def _test_wrapper(args: tuple) -> Tuple[int, Circuit]:
+ from lmfit import minimize
+ from lmfit.minimizer import MinimizerResult
+
+ f: Frequencies
+ Z_exp: ComplexImpedances
+ weight: NDArray[float64]
+ num_RC: int
+ add_capacitance: bool
+ add_inductance: bool
+ admittance: bool
+ log_F_ext: float
+ method: str
+ max_nfev: int
+ (
+ f,
+ Z_exp,
+ weight,
+ num_RC,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ log_F_ext,
+ method,
+ max_nfev,
+ ) = args
+
+ if not _is_floating_array(f):
+ raise TypeError(f"Expected an array of floats instead of {f=}")
+
+ if not _is_complex_array(Z_exp):
+ raise TypeError(f"Expected an array of complex values instead of {Z_exp=}")
+
+ if not _is_floating_array(weight):
+ raise TypeError(f"Expected an array of floats instead of {weight=}")
+
+ if not _is_integer(num_RC):
+ raise TypeError(f"Expected an integer instead of {num_RC=}")
+
+ if not _is_boolean(add_capacitance):
+ raise TypeError(f"Expected a boolean instead of {add_capacitance=}")
+
+ if not _is_boolean(admittance):
+ raise TypeError(f"Expected a boolean instead of {admittance=}")
+
+ if not _is_floating(log_F_ext):
+ raise TypeError(f"Expected a float instead of {log_F_ext=}")
+
+ if not isinstance(method, str):
+ raise TypeError(f"Expected a string instead of {method=}")
+
+ if not _is_integer(num_RC):
+ raise TypeError(f"Expected an integer instead of {num_RC=}")
+
+ if not _is_integer(max_nfev):
+ raise TypeError(f"Expected an integer instead of {max_nfev=}")
+
+ w: NDArray[float64] = 2 * pi * f
+ taus: NDArray[float64] = _generate_time_constants(w, num_RC, log_F_ext)
+ circuit: Circuit = _generate_circuit(
+ taus,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ )
+
+ identifiers: Dict[int, Element] = {
+ v: k for k, v in circuit.generate_element_identifiers(running=True).items()
+ }
+ fit: MinimizerResult
+ fit = minimize(
+ _complex_residual,
+ _to_lmfit(identifiers),
+ method,
+ args=(
+ circuit,
+ f,
+ Z_exp ** (-1 if admittance else 1),
+ weight,
+ identifiers,
+ admittance,
+ ),
+ max_nfev=None if max_nfev < 1 else max_nfev,
+ )
+ _from_lmfit(fit.params, identifiers)
+
+ return (
+ num_RC,
+ circuit,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/exploratory.py b/src/pyimpspec/analysis/kramers_kronig/exploratory.py
new file mode 100644
index 0000000..f1746dd
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/exploratory.py
@@ -0,0 +1,1453 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from collections import namedtuple
+from functools import partial
+from multiprocessing import Pool
+from multiprocessing.context import TimeoutError as MPTimeoutError
+from numpy import (
+ argmin,
+ argwhere,
+ array,
+ ceil,
+ diff,
+ float64,
+ int64,
+ isclose,
+ isnan,
+ linspace,
+ log10 as log,
+ mean,
+ zeros,
+)
+from numpy.typing import NDArray
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.data import DataSet
+from pyimpspec.exceptions import KramersKronigError
+from pyimpspec.progress import Progress
+from pyimpspec.typing import (
+ ComplexImpedances,
+ Frequencies,
+)
+from pyimpspec.typing.helpers import (
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ _is_boolean,
+ _is_floating,
+ _is_integer,
+ _is_integer_list,
+)
+from pyimpspec.analysis.utility import (
+ _calculate_pseudo_chisqr,
+ _calculate_residuals,
+ _get_default_num_procs,
+)
+from .result import KramersKronigResult
+from .cnls import _test_wrapper as _cnls_test
+from .matrix_inversion import _test_wrapper as _inversion_test
+from .least_squares import _test_wrapper as _leastsq_test
+from .utility import (
+ _boukamp_weight,
+)
+from .algorithms import (
+ suggest_num_RC,
+ suggest_representation,
+)
+from .algorithms.utility.logistic import (
+ _logistic_derivative,
+ _logistic_function,
+)
+from .algorithms.utility.cubic import (
+ _cubic_function,
+ _fit_cubic_function,
+)
+from .algorithms.utility.pseudo_chi_squared import (
+ _approximate_transition_and_end_point,
+ _calculate_intercept_of_lines,
+ _fit_intersecting_lines,
+ _intersecting_lines_function,
+)
+
+
+_DEBUG = bool(0)
+
+
+_KKFits = namedtuple(
+ "_KKFits",
+ [
+ "log_F_ext",
+ "num_RCs",
+ "circuits",
+ "pseudo_chisqrs",
+ ],
+)
+
+
+def _use_matrix_inversion(
+ test: str,
+ f: Frequencies,
+ Z_exp: ComplexImpedances,
+ weight: NDArray[float64],
+ num_RCs: List[int],
+ add_capacitance: bool,
+ admittance: bool,
+ log_F_ext: float,
+ prog: Optional[Progress],
+) -> _KKFits:
+ supported_tests: List[str] = [
+ "complex",
+ "real",
+ "imaginary",
+ ]
+ if test not in supported_tests:
+ raise ValueError(f"{test=} is not among the valid values {supported_tests=}")
+
+ args = (
+ (
+ test,
+ f,
+ Z_exp,
+ weight,
+ num_RC,
+ add_capacitance,
+ admittance,
+ log_F_ext,
+ )
+ for num_RC in num_RCs
+ )
+ if prog:
+ prog.increment()
+ prog.set_message("Performing tests")
+
+ fits: List[Tuple[int, Circuit]] = []
+
+ res: Tuple[int, Circuit]
+ for res in map(_inversion_test, args):
+ fits.append(res)
+ if prog:
+ prog.increment()
+
+ fits.sort(key=lambda f: f[0])
+ weight = _boukamp_weight(Z_exp, admittance=False)
+ pseudo_chisqrs: List[float] = [
+ _calculate_pseudo_chisqr(
+ Z_exp,
+ circuit.get_impedances(f),
+ weight,
+ )
+ for (num_RC, circuit) in fits
+ ]
+
+ return _KKFits(
+ log_F_ext=log_F_ext,
+ num_RCs=[f[0] for f in fits],
+ circuits=[f[1] for f in fits],
+ pseudo_chisqrs=pseudo_chisqrs,
+ )
+
+
+def _use_cnls(
+ f: Frequencies,
+ Z_exp: ComplexImpedances,
+ weight: NDArray[float64],
+ automatically_limit_num_RC: bool,
+ num_RCs: List[int],
+ add_capacitance: bool,
+ add_inductance: bool,
+ admittance: bool,
+ log_F_ext: float,
+ method: str,
+ max_nfev: int,
+ num_procs: int,
+ timeout: int,
+ prog: Optional[Progress],
+) -> _KKFits:
+ def calculate_log_sum_abs_tau_var(circuit: Circuit) -> float64:
+ key: str = "C" if admittance else "R"
+ total: float = 0.0
+
+ element: Element
+ for element in circuit.get_elements():
+ parameters: Dict[str, float] = element.get_values()
+ if "tau" in parameters and parameters[key] != 0.0:
+ total += abs(parameters["tau"] / parameters[key])
+
+ return log(total)
+
+ args = (
+ (
+ f,
+ Z_exp,
+ weight,
+ num_RC,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ log_F_ext,
+ method,
+ max_nfev,
+ )
+ for num_RC in num_RCs
+ )
+ if prog is not None:
+ prog.increment()
+ prog.set_message("Performing tests")
+
+ fits: List[Tuple[int, Circuit]] = []
+ with Pool(num_procs) as pool:
+ threshold: Optional[float] = None
+ log_sum_abs_tau_var: Dict[int, float] = {}
+ max_count: int = 5
+
+ iterator = pool.imap(_cnls_test, args, 1)
+ while True:
+ try:
+ res: Tuple[int, Circuit] = iterator.next(timeout=timeout)
+ except MPTimeoutError:
+ if len(fits) == 0:
+ raise KramersKronigError(
+ "Timed out before finishing the fitting process! Try increasing the timeout setting."
+ )
+ break
+ except StopIteration:
+ break
+
+ fits.append(res)
+
+ if prog is not None:
+ prog.increment()
+
+ if not automatically_limit_num_RC:
+ continue
+
+ log_sum_abs_tau_var[fits[-1][0]] = calculate_log_sum_abs_tau_var(res[1])
+ if (threshold is None) and (len(fits) >= 5):
+ threshold = min(list(log_sum_abs_tau_var.values()))
+
+ if (len(fits) < 10) or (len(fits) % 5 == 0):
+ continue
+
+ count: int = sum(
+ (1 for f in fits[-max_count:] if log_sum_abs_tau_var[f[0]] < threshold)
+ )
+ if count >= max_count:
+ break
+
+ while True:
+ try:
+ next(args)
+ except StopIteration:
+ break
+
+ if prog is not None:
+ prog.increment()
+
+ fits.sort(key=lambda f: f[0])
+ weight = _boukamp_weight(Z_exp, admittance=False)
+ pseudo_chisqrs: List[float] = [
+ _calculate_pseudo_chisqr(
+ Z_exp,
+ circuit.get_impedances(f),
+ weight,
+ )
+ for (num_RC, circuit) in fits
+ ]
+
+ return _KKFits(
+ log_F_ext=log_F_ext,
+ num_RCs=[f[0] for f in fits],
+ circuits=[f[1] for f in fits],
+ pseudo_chisqrs=pseudo_chisqrs,
+ )
+
+
+def _use_least_squares_fitting(
+ test: str,
+ f: Frequencies,
+ Z_exp: ComplexImpedances,
+ weight: NDArray[float64],
+ num_RCs: List[int],
+ add_capacitance: bool,
+ add_inductance: bool,
+ admittance: bool,
+ log_F_ext: float,
+ prog: Optional[Progress],
+) -> _KKFits:
+ supported_tests: List[str] = [
+ "complex",
+ "real",
+ "imaginary",
+ ]
+ if test not in supported_tests:
+ raise ValueError(f"{test=} is not among the valid values {supported_tests=}")
+
+ args = (
+ (
+ test,
+ f,
+ Z_exp,
+ weight,
+ num_RC,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ log_F_ext,
+ )
+ for num_RC in num_RCs
+ )
+ if prog:
+ prog.increment()
+ prog.set_message("Performing tests")
+
+ fits: List[Tuple[int, Circuit]] = []
+
+ res: Tuple[int, Circuit]
+ for res in map(_leastsq_test, args):
+ fits.append(res)
+ if prog:
+ prog.increment()
+
+ fits.sort(key=lambda f: f[0])
+ weight = _boukamp_weight(Z_exp, admittance=False)
+ pseudo_chisqrs: List[float] = [
+ _calculate_pseudo_chisqr(
+ Z_exp,
+ circuit.get_impedances(f),
+ weight,
+ )
+ for (num_RC, circuit) in fits
+ ]
+
+ return _KKFits(
+ log_F_ext=log_F_ext,
+ num_RCs=[f[0] for f in fits],
+ circuits=[f[1] for f in fits],
+ pseudo_chisqrs=pseudo_chisqrs,
+ )
+
+
+def _perform_tests(
+ test: str,
+ f: Frequencies,
+ Z_exp: ComplexImpedances,
+ weight: NDArray[float64],
+ automatically_limit_num_RC: bool,
+ num_RCs: List[int],
+ add_capacitance: bool,
+ add_inductance: bool,
+ admittance: bool,
+ log_F_ext: float,
+ cnls_method: str,
+ max_nfev: int,
+ num_procs: int,
+ timeout: int,
+ prog: Progress,
+ **kwargs,
+) -> _KKFits:
+ fits: _KKFits
+ if test == "cnls":
+ fits = _use_cnls(
+ f,
+ Z_exp,
+ weight,
+ automatically_limit_num_RC,
+ num_RCs,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ log_F_ext,
+ cnls_method,
+ max_nfev,
+ num_procs,
+ timeout,
+ prog,
+ )
+ elif test.endswith("-inv"):
+ fits = _use_matrix_inversion(
+ test.replace("-inv", ""),
+ f,
+ Z_exp,
+ weight,
+ num_RCs,
+ add_capacitance,
+ admittance,
+ log_F_ext,
+ prog,
+ )
+ else:
+ fits = _use_least_squares_fitting(
+ test,
+ f,
+ Z_exp,
+ weight,
+ num_RCs,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ log_F_ext,
+ prog,
+ )
+
+ return fits
+
+
+def _wrapper(args: tuple) -> _KKFits:
+ (
+ log_F_ext,
+ kwargs,
+ ) = args
+ kwargs["log_F_ext"] = log_F_ext
+ fits: _KKFits = _perform_tests(**kwargs)
+
+ return fits
+
+
+def _calculate_statistic(
+ fits: _KKFits,
+ f: Frequencies,
+ test: str,
+ target_num_RC: int,
+) -> float:
+ x: NDArray[float64] = array([num_RC for num_RC in fits.num_RCs], dtype=float64)
+ y: NDArray[float64] = log(fits.pseudo_chisqrs)
+
+ if target_num_RC > 0:
+ i: int = argmin(abs(x - target_num_RC))
+
+ return mean(y[i:i + 2])
+
+ # Unable to estimate the target num_RC at
+ # log(fits.log_F_ext) = 0 for some reason.
+ intercept_x: int
+ max_x: int
+ p: Tuple[float, float, float, float]
+ intercept_x, max_x, p = _approximate_transition_and_end_point(x, y)
+
+ min_x: float64 = min(x)
+ if isnan(p).any():
+ y = zeros(3, dtype=float64)
+ for i, _x in enumerate((min_x, intercept_x, max_x)):
+ y[i] = y[argmin(abs(x - _x)).flatten()]
+ else:
+ x = array([min_x, intercept_x, max_x], dtype=float64)
+ y = _intersecting_lines_function(x, *p)
+
+ x /= len(f)
+ y -= y[2]
+ if y[0] != 0.0:
+ y /= y[0]
+
+ return x[1] ** 2 + abs(y[1])
+
+
+def _log_F_ext_residual(
+ params: "Parameters", # noqa: F821
+ kwargs: dict,
+ cache: List[Tuple[_KKFits, float]],
+ prog: Optional[Progress],
+) -> float:
+ log_F_ext: float = params.valuesdict()["log_F_ext"]
+ fits: _KKFits = _wrapper(
+ (
+ log_F_ext,
+ kwargs,
+ )
+ )
+ statistic: float = _calculate_statistic(
+ fits,
+ f=kwargs["f"],
+ test=kwargs["test"],
+ target_num_RC=kwargs["target_num_RC"],
+ )
+
+ cache.append((fits, statistic))
+ if prog is not None:
+ prog.increment()
+
+ return statistic
+
+
+def _evaluate_log_F_ext_using_lmfit(
+ min_log_F_ext: float,
+ max_log_F_ext: float,
+ num_F_ext_evaluations: int,
+ rapid_F_ext_evaluations: bool,
+ wrapper_kwargs: dict,
+ prog: Progress,
+ method: str = "differential_evolution",
+) -> List[Tuple[_KKFits, float]]:
+ from lmfit import minimize, Parameters
+
+ if not (min_log_F_ext <= 0.0 < max_log_F_ext):
+ raise ValueError(f"Expected {min_log_F_ext=} <= 0.0 < {max_log_F_ext=}")
+
+ max_nfev: Optional[int] = (
+ num_F_ext_evaluations if num_F_ext_evaluations > 0 else None
+ )
+ if not max_nfev:
+ prog.increment()
+
+ # Perform entirely using least squares fitting
+ parameters = Parameters()
+ parameters.add(
+ "log_F_ext",
+ value=min((0.1, max_log_F_ext / 2)),
+ min=min_log_F_ext,
+ max=max_log_F_ext,
+ )
+ if not max_nfev:
+ prog.increment()
+
+ evaluations: List[Tuple[_KKFits, float]] = []
+
+ baseline_result: _KKFits = _wrapper((0.0, wrapper_kwargs))
+ target_num_RC: int = _estimate_target_num_RC(baseline_result)
+ wrapper_kwargs["target_num_RC"] = target_num_RC
+
+ num_RCs: List[int] = wrapper_kwargs["num_RCs"][:]
+ if rapid_F_ext_evaluations and target_num_RC > 0:
+ wrapper_kwargs["num_RCs"] = [
+ num_RC
+ for num_RC in num_RCs
+ if num_RC <= min((max(num_RCs), target_num_RC + 5))
+ ]
+
+ evaluations.append(
+ (
+ baseline_result,
+ _calculate_statistic(
+ baseline_result,
+ f=wrapper_kwargs["f"],
+ test=wrapper_kwargs["test"],
+ target_num_RC=wrapper_kwargs["target_num_RC"],
+ ),
+ )
+ )
+
+ if not max_nfev:
+ prog.increment()
+
+ minimize(
+ _log_F_ext_residual,
+ parameters,
+ # Many of the other methods tend to get stuck on local minima,
+ # but the following work quite well
+ # - "differential_evolution"
+ # - "powell"
+ # - "slsqp"
+ # - "bfgs"
+ method=method,
+ args=(
+ wrapper_kwargs,
+ evaluations,
+ prog if max_nfev else None,
+ ),
+ max_nfev=max_nfev,
+ )
+ if not max_nfev:
+ prog.increment()
+
+ if rapid_F_ext_evaluations and set(num_RCs) != set(wrapper_kwargs["num_RCs"]):
+ wrapper_kwargs["num_RCs"] = num_RCs
+ evaluations = sorted(evaluations, key=lambda e: e[1])
+ fits, statistic = evaluations.pop(0)
+ fits = _wrapper((fits.log_F_ext, wrapper_kwargs))
+ evaluations.insert(0, (fits, statistic))
+
+ if _DEBUG: # TODO: User-configurable setting?
+ _debug_plot_statistic(evaluations)
+
+ return evaluations
+
+
+def _debug_plot_statistic(evaluations: List[Tuple[_KKFits, float]]):
+ x = []
+ y = []
+ for kk, stat in sorted(evaluations, key=lambda e: e[0].log_F_ext):
+ x.append(kk.log_F_ext)
+ y.append(stat)
+
+ import matplotlib.pyplot as plt
+
+ fig, ax = plt.subplots()
+ ax.scatter(x, y)
+
+ ax.set_xlabel(r"$\log{F_{\rm ext}}$")
+ ax.set_ylabel(r"statistic")
+
+ plt.show()
+
+
+def _fit_cubic_and_interpolate(
+ x: NDArray[float64],
+ y: NDArray[float64],
+) -> Tuple[NDArray[float64], NDArray[float64]]:
+ j: int = argmin(y)
+ delta: int = 2
+ if not (delta % 2 == 0):
+ raise ValueError(f"Expected {delta % 2=} == 0")
+
+ i: int = j - delta
+ k: int = j + delta
+ if i < 0:
+ i = 0
+ k = min((i + 2 * delta, len(x) - 1))
+ elif k > len(x) - 1:
+ k = len(x) - 1
+ i = max((0, k - 2 * delta))
+
+ if (k - i) == (2 * delta) and (k - i) > 3:
+ x = x[i:k + 1]
+ y = y[i:k + 1]
+
+ p: Tuple[float, float, float, float] = _fit_cubic_function(x, y)
+
+ x_interp: NDArray[float64] = linspace(
+ min(x),
+ max(x),
+ num=int(round((max(x) - min(x)) * 100)),
+ )
+
+ return (
+ x_interp,
+ _cubic_function(x_interp, *p),
+ )
+
+
+def _pick_minimum(
+ x: List[float],
+ y: List[float],
+ x_interp: NDArray[float64],
+ y_interp: NDArray[float64],
+) -> float:
+ from scipy.signal import argrelmin
+
+ candidates: List[Tuple[float, float]] = []
+
+ i: int = argmin(y)
+ candidates.append((x[i], y[i]))
+
+ if len(y_interp) > 0:
+ i = argmin(y_interp)
+ candidates.append((x_interp[i], y_interp[i]))
+
+ indices: NDArray[int64] = argrelmin(y_interp)[0]
+ if len(indices) > 0:
+ minima: NDArray[float64] = y_interp[indices]
+ i = indices[argmin(minima)]
+ candidates.append((x_interp[i], y_interp[i]))
+
+ return min(candidates, key=lambda xy: xy[1])[0]
+
+
+def _fit_logistic_function(
+ x: NDArray[float64],
+ y: NDArray[float64],
+ p0: Optional[Tuple[float, ...]] = None,
+ bounds: Optional[Tuple[NDArray[float64], NDArray[float64]]] = None,
+) -> Tuple[float, ...]:
+ from scipy.optimize import curve_fit
+
+ kwargs = {}
+ if p0 is None:
+ p0 = [
+ -1.0,
+ 2.0,
+ min((10.0, (max(x) - min(x)) / 2.0)),
+ 1.0,
+ ]
+ kwargs["p0"] = p0
+ if bounds is not None:
+ kwargs["bounds"] = bounds
+
+ p: Tuple[float, ...] = curve_fit(
+ _logistic_function,
+ x,
+ y,
+ **kwargs,
+ )[0]
+
+ return p
+
+
+def _estimate_target_num_RC(baseline_result: _KKFits) -> int:
+ # The slope of the first few points at 'log_F_ext == 0.0'
+ # should provide a decent estimate for the num_RC to target when
+ # extrapolated to the lowest log(X²ps) value obtained when the
+ # tests were performed at 'log_F_ext == 0.0'.
+ from scipy.stats import linregress
+ from pandas import Series
+
+ def backup_approach(x: NDArray[float64], y: NDArray[float64]):
+ try:
+ p = _fit_intersecting_lines(x, y)
+ except (ValueError, ZeroDivisionError):
+ return -2
+
+ try:
+ return int(ceil(_calculate_intercept_of_lines(p[0], p[1], 0.0, min(y))))
+ except (ValueError, ZeroDivisionError):
+ return -3
+
+ if not (diff(baseline_result.num_RCs) == 1).all():
+ return -1
+
+ x: NDArray[float64] = array(baseline_result.num_RCs, dtype=float64)
+ y: NDArray[float64] = baseline_result.pseudo_chisqrs[:]
+ y = y[: argmin(y) + 1]
+ if len(y) < len(x):
+ y = y + [min(y)] * (len(baseline_result.pseudo_chisqrs) - len(y))
+ y = log(y)
+
+ # Some test implementations may have a very significant drop in
+ # X²ps at high num_RC when the number of points per decade is low.
+ i: int = len(y) - 1
+ while (y[i - 1] - y[i]) > 1.0:
+ i -= 1
+
+ x = x[: i + 1]
+ y = y[: i + 1]
+
+ std = Series(y).rolling(3, center=True, min_periods=1).std()
+ rel_std = (std - min(std)) / (max(std) - min(std))
+
+ try:
+ p: Tuple[float, ...] = _fit_logistic_function(x, rel_std)
+ except (RuntimeError, ValueError):
+ return backup_approach(x, y)
+
+ if _DEBUG:
+ import matplotlib.pyplot as plt
+
+ fig, ax1 = plt.subplots()
+ ax2 = ax1.twinx()
+
+ ax1.scatter(x, y, edgecolor="black", facecolor="none", marker="o")
+ ax2.scatter(x, rel_std, color="red", marker="+")
+
+ smooth_x = linspace(min(x), max(x), num=int(ceil(max(x) - min(x))) * 100)
+ ax2.plot(
+ smooth_x,
+ _logistic_function(smooth_x, *p),
+ color="red",
+ linestyle="-",
+ )
+
+ smooth_y = _logistic_derivative(smooth_x, *p)
+ ax2.plot(
+ smooth_x,
+ (smooth_y - min(smooth_y)) / (max(smooth_y) - min(smooth_y)),
+ color="blue",
+ linestyle="--",
+ )
+
+ i = argmin(abs(x - int(ceil(p[2]))))
+
+ if len(x[:i]) < 5:
+ slope = _logistic_derivative(p[2], *p)
+ intercept = _logistic_function(p[2], *p) - slope * p[2]
+ i = argmin(abs(x - (min(rel_std) - intercept) / slope))
+
+ if _DEBUG:
+ m = min(argwhere(smooth_y <= max(rel_std)).flatten())
+ n = max(argwhere(smooth_y >= min(rel_std)).flatten())
+ ax2.plot(
+ smooth_x[m:n + 1],
+ smooth_y[m:n + 1],
+ color="green",
+ linestyle=":",
+ )
+
+ if len(x[:i]) < 4:
+ if _DEBUG:
+ plt.close()
+
+ return backup_approach(x, y)
+
+ regression = linregress(x[:i], y[:i])
+ slope: float = regression.slope
+ intercept: float = regression.intercept
+ if slope >= 0.0:
+ if _DEBUG:
+ plt.close()
+
+ return backup_approach(x, y)
+
+ if _DEBUG:
+ smooth_x = linspace(min(x), max(x), num=int(ceil(max(x) - min(x))) * 100)
+ smooth_y = slope * smooth_x + intercept
+ m = min(argwhere(smooth_y <= max(y)).flatten())
+ n = max(argwhere(smooth_y >= min(y)).flatten())
+ ax1.plot(
+ smooth_x[m:n + 1],
+ smooth_y[m:n + 1],
+ color="black",
+ linestyle=":",
+ )
+ ax1.axvline(x[i] - 0.5, color="black", linestyle="--")
+
+ max_x: int
+ _, max_x, _ = _approximate_transition_and_end_point(x, y)
+ for xy in sorted(zip(x, y), key=lambda xy: xy[1]):
+ if xy[0] <= max_x:
+ break
+
+ target_num_RC: int = (
+ int(
+ ceil(
+ _calculate_intercept_of_lines(
+ slope,
+ intercept,
+ 0.0,
+ xy[1],
+ )
+ )
+ )
+ + 1
+ )
+
+ if _DEBUG:
+ print(f"{x[i]=}, {target_num_RC=}")
+ ax1.set_xlim(0, min((p[2] + 20, max(x))))
+ plt.show()
+
+ return target_num_RC
+
+
+def _evaluate_log_F_ext_using_custom_approach(
+ min_log_F_ext: float,
+ max_log_F_ext: float,
+ num_F_ext_evaluations: int,
+ rapid_F_ext_evaluations: bool,
+ wrapper_kwargs: dict,
+ prog: Progress,
+ _map: Callable,
+) -> List[Tuple[_KKFits, float]]:
+ if not (min_log_F_ext <= 0.0 < max_log_F_ext):
+ raise ValueError(f"Expected {min_log_F_ext=} <= 0.0 < {max_log_F_ext=}")
+
+ baseline_result: _KKFits = _wrapper((0.0, wrapper_kwargs))
+ target_num_RC: int = _estimate_target_num_RC(baseline_result)
+ wrapper_kwargs["target_num_RC"] = target_num_RC
+
+ num_RCs: List[int] = wrapper_kwargs["num_RCs"][:]
+ if rapid_F_ext_evaluations and target_num_RC > 0:
+ wrapper_kwargs["num_RCs"] = [
+ num_RC
+ for num_RC in num_RCs
+ if num_RC <= min((max(num_RCs), target_num_RC + 5))
+ ]
+
+ prog.increment()
+
+ # Find the approximate location of the minimum
+ stage_1_num_F_ext_evaluations: int = int(ceil(num_F_ext_evaluations / 2)) + 1
+ stage_1: NDArray[float64] = linspace(
+ min_log_F_ext,
+ max_log_F_ext,
+ num=stage_1_num_F_ext_evaluations,
+ )
+
+ stage_1_results: List[_KKFits] = []
+ log_F_ext: float64
+ for res in _map(
+ _wrapper,
+ (
+ (log_F_ext, wrapper_kwargs)
+ for log_F_ext in stage_1
+ if not isclose(log_F_ext, 0.0)
+ ),
+ ):
+ stage_1_results.append(res)
+ prog.increment()
+
+ step_size: float64 = abs(stage_1[1] - stage_1[0])
+ stage_1_results.append(baseline_result)
+ stage_1_results.sort(key=lambda kk: kk.log_F_ext)
+
+ x: List[float] = []
+ y: List[float] = []
+ for fits in sorted(stage_1_results, key=lambda res: res.log_F_ext):
+ x.append(fits.log_F_ext)
+ y.append(
+ _calculate_statistic(
+ fits,
+ f=wrapper_kwargs["f"],
+ test=wrapper_kwargs["test"],
+ target_num_RC=wrapper_kwargs["target_num_RC"],
+ )
+ )
+
+ x_interp_1: NDArray[float64]
+ y_interp_1: NDArray[float64]
+ x_interp_1, y_interp_1 = _fit_cubic_and_interpolate(x, y)
+
+ stage_1_minimum: float64
+ stage_1_minimum = _pick_minimum(x, y, x_interp_1, y_interp_1)
+
+ # Refine the range near the approximated minimum
+ min_log_F_ext = max(
+ (
+ min_log_F_ext,
+ stage_1_minimum - step_size / 2,
+ )
+ )
+ max_log_F_ext = min(
+ (
+ max_log_F_ext,
+ min_log_F_ext + step_size,
+ )
+ )
+
+ stage_2_num_F_ext_evaluations: int = num_F_ext_evaluations - len(stage_1_results)
+
+ x.clear()
+ for log_F_ext in linspace(
+ min_log_F_ext,
+ max_log_F_ext,
+ num=max((3, stage_2_num_F_ext_evaluations + 1)),
+ )[1:-1]:
+ if not isclose(log_F_ext, stage_1, atol=1e-4).any():
+ x.append(log_F_ext)
+
+ stage_2: NDArray[float64] = array(x)
+ stage_2_results: List[_KKFits] = []
+ for res in _map(
+ _wrapper,
+ ((log_F_ext, wrapper_kwargs) for log_F_ext in stage_2),
+ ):
+ stage_2_results.append(res)
+ prog.increment()
+
+ intermediate_results: List[_KKFits] = stage_1_results + stage_2_results
+
+ x.clear()
+ y.clear()
+ for fits in sorted(intermediate_results, key=lambda res: res.log_F_ext):
+ x.append(fits.log_F_ext)
+ y.append(
+ _calculate_statistic(
+ fits,
+ f=wrapper_kwargs["f"],
+ test=wrapper_kwargs["test"],
+ target_num_RC=wrapper_kwargs["target_num_RC"],
+ ),
+ )
+
+ x_interp_2: NDArray[float64]
+ y_interp_2: NDArray[float64]
+ x_interp_2, y_interp_2 = _fit_cubic_and_interpolate(x, y)
+
+ stage_2_minimum: float64
+ stage_2_minimum = _pick_minimum(x, y, x_interp_2, y_interp_2)
+
+ if not (
+ isclose(stage_2_minimum, stage_1, atol=1e-4).any()
+ or isclose(stage_2_minimum, stage_2, atol=1e-4).any()
+ ):
+ intermediate_results.append(_wrapper((stage_2_minimum, wrapper_kwargs)))
+
+ evaluations: List[Tuple[_KKFits, float]] = []
+ for fits in intermediate_results:
+ evaluations.append(
+ (
+ fits,
+ _calculate_statistic(
+ fits,
+ f=wrapper_kwargs["f"],
+ test=wrapper_kwargs["test"],
+ target_num_RC=wrapper_kwargs["target_num_RC"],
+ ),
+ )
+ )
+
+ if rapid_F_ext_evaluations and set(num_RCs) != set(wrapper_kwargs["num_RCs"]):
+ wrapper_kwargs["num_RCs"] = num_RCs
+ evaluations = sorted(evaluations, key=lambda e: e[1])
+ fits, statistic = evaluations.pop(0)
+ fits = _wrapper((fits.log_F_ext, wrapper_kwargs))
+ evaluations.insert(0, (fits, statistic))
+
+ if _DEBUG: # TODO: User-configurable setting?
+ _debug_plot_statistic(evaluations)
+
+ return evaluations
+
+
+def evaluate_log_F_ext(
+ data: DataSet,
+ test: str = "real",
+ num_RCs: Optional[List[int]] = None,
+ add_capacitance: bool = True,
+ add_inductance: bool = True,
+ admittance: bool = False,
+ min_log_F_ext: float = -1.0,
+ max_log_F_ext: float = 1.0,
+ log_F_ext: float = 0.0,
+ num_F_ext_evaluations: int = 20,
+ rapid_F_ext_evaluations: bool = True,
+ cnls_method: str = "leastsq",
+ max_nfev: int = 0,
+ timeout: int = 60,
+ num_procs: int = -1,
+ **kwargs,
+) -> List[Tuple[float, List[KramersKronigResult], float]]:
+ """
+ Evaluates extensions (or contractions) of the range of time constants in order to find an optimum.
+ Linear Kramers-Kronig tests are performed at various ranges of time constants.
+ The limits of the default range are defined by the reciprocals of the maximum and minimum excitation frequencies.
+ The lower and upper limits of the extended (or contracted) ranges are defined as :math:`\\tau_{\\rm min} = 1/(F_{\\rm ext}\\omega_{\\rm max})` and :math:`\\tau_{\\rm max} = F_{\\rm ext}/\\omega_{\\rm min}`, respectively.
+
+ References:
+
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+ - B.A. Boukamp, 1995, J. Electrochem. Soc., 142, 1885-1894 (https://doi.org/10.1149/1.2044210)
+
+ Parameters
+ ----------
+ data: DataSet
+ The data set to be tested.
+
+ test: str, optional
+ See |perform_kramers_kronig_test| for details.
+
+ num_RCs: Optional[List[int]], optional
+ See |perform_exploratory_kramers_kronig_tests| for details.
+
+ add_capacitance: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ add_inductance: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ admittance: bool, optional
+ Perform the linear Kramers-Kronig test on the admittance representation instead of the impedance representation.
+
+ min_log_F_ext: float, optional
+ See |perform_kramers_kronig_test| for details.
+
+ max_log_F_ext: float, optional
+ See |perform_kramers_kronig_test| for details.
+
+ log_F_ext: float, optional
+ See |perform_kramers_kronig_test| for details.
+
+ num_F_ext_evaluations: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ rapid_F_ext_evaluations: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ cnls_method: str, optional
+ See |perform_kramers_kronig_test| for details.
+
+ max_nfev: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ timeout: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ num_procs: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ **kwargs
+
+ Returns
+ -------
+ List[Tuple[float, List[KramersKronigResult], float]]
+ A list of tuples where the first element is the extension in terms of decades beyond the default range, the second element is a list of linear Kramers-Kronig test results performed with a different number of RC elements (i.e., time constants), and the third element is the statistic indicating the quality of the extension (the smaller the better). The list of tuples is sorted from best to worst. The lists of |KramersKronigResult| instances are sorted from lowest to highest number of RC elements, and the optimal number of RC elements still needs to be determined.
+ """
+ if not isinstance(test, str):
+ raise TypeError(f"Expected a string instead of {test=}")
+
+ if not _is_boolean(add_capacitance):
+ raise TypeError(f"Expected a boolean instead of {add_capacitance=}")
+
+ if not _is_boolean(add_inductance):
+ raise TypeError(f"Expected a boolean instead of {add_inductance=}")
+ elif not add_inductance and test.endswith("-inv"):
+ raise ValueError(
+ "The tests implemented using matrix inversion must include the series/parallel inductance"
+ )
+
+ if not _is_boolean(admittance):
+ raise TypeError(f"Expected a boolean instead of {admittance=}")
+
+ if num_RCs is None:
+ num_RCs = []
+ elif not _is_integer_list(num_RCs):
+ raise TypeError(f"Expected None or a list of integers instead of {num_RCs=}")
+
+ if not _is_floating(min_log_F_ext):
+ raise TypeError(f"Expected a float instead of {max_log_F_ext=}")
+ elif min_log_F_ext > 0.0:
+ raise ValueError(f"Expected {min_log_F_ext=} <= 0.0")
+
+ if not _is_floating(max_log_F_ext):
+ raise TypeError(f"Expected a float instead of {max_log_F_ext=}")
+ elif max_log_F_ext <= 0.0:
+ raise ValueError(f"Expected {max_log_F_ext=} > 0.0")
+
+ if not _is_floating(log_F_ext):
+ raise TypeError(f"Expected a float instead of {log_F_ext=}")
+
+ if not _is_integer(num_F_ext_evaluations):
+ raise TypeError(f"Expected an integer instead of {num_F_ext_evaluations=}")
+
+ if not _is_boolean(rapid_F_ext_evaluations):
+ raise TypeError(f"Expected a boolean instead of {rapid_F_ext_evaluations=}")
+
+ if not isinstance(cnls_method, str):
+ raise TypeError(f"Expected a string instead of {cnls_method=}")
+
+ if not _is_integer(max_nfev):
+ raise TypeError(f"Expected an integer instead of {max_nfev=}")
+
+ if not _is_integer(timeout):
+ raise TypeError(f"Expected an integer instead of {timeout=}")
+
+ if not _is_integer(num_procs):
+ raise TypeError(f"Expected an integer instead of {num_procs=}")
+ elif num_procs < 1:
+ num_procs = max((_get_default_num_procs() - abs(num_procs), 1))
+
+ f: Frequencies = data.get_frequencies()
+ Z_exp: ComplexImpedances = data.get_impedances()
+
+ automatically_limit_num_RC: bool = len(num_RCs) == 0
+ num_points: int = len(f)
+ max_num_RC: int = 2 * num_points - 5
+ if test.endswith("-inv"):
+ max_num_RC = min((num_points + 10, max_num_RC))
+
+ if len(num_RCs) > 0 and max(num_RCs) > max_num_RC:
+ raise KramersKronigError(
+ f"The maximum value of num_RCs must be less than or equal to {max_num_RC}"
+ )
+
+ if automatically_limit_num_RC:
+ num_RCs = list(range(2, max_num_RC + 1))
+ else:
+ num_RCs = sorted(num_RCs)
+
+ if not all(map(lambda n: 2 <= n <= max_num_RC, num_RCs)):
+ raise KramersKronigError(
+ f"Expected all values in num_RCs to be withing the range [2,{max_num_RC}] instead of {num_RCs}"
+ )
+
+ num_steps: int = 2 # Calculating weight and preparing arguments
+
+ estimate_log_F_ext: bool = num_F_ext_evaluations != 0
+ if estimate_log_F_ext:
+ if not automatically_limit_num_RC:
+ raise ValueError(
+ "Expected the range of RC elements to be determined automatically when evaluating extensions of the range of time constants (i.e., expected 'num_RCs == []' when 'num_F_ext_evaluations != 0')"
+ )
+ elif abs(num_F_ext_evaluations) < 10:
+ raise ValueError(
+ f"Expected at least 10 evaluations instead of {abs(num_F_ext_evaluations)=} for the optimization of log F_ext"
+ )
+
+ if num_F_ext_evaluations < 0:
+ num_steps += abs(num_F_ext_evaluations) + 2
+ else:
+ num_steps += abs(num_F_ext_evaluations)
+ else:
+ num_steps += len(num_RCs)
+
+ prog: Progress
+ with Progress(
+ "Preparing arguments",
+ total=num_steps + 1,
+ N=(5 if test == "cnls" else 10),
+ ) as prog:
+ weight: NDArray[float64] = _boukamp_weight(Z_exp, admittance=admittance)
+ prog.increment()
+
+ wrapper_kwargs = dict(
+ test=test,
+ f=f,
+ Z_exp=Z_exp,
+ weight=weight,
+ automatically_limit_num_RC=automatically_limit_num_RC,
+ num_RCs=num_RCs,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ cnls_method=cnls_method,
+ max_nfev=max_nfev,
+ num_procs=num_procs,
+ timeout=timeout,
+ prog=prog if not estimate_log_F_ext else None,
+ )
+
+ evaluations: List[Tuple[_KKFits, float]]
+ if not estimate_log_F_ext:
+ fits: _KKFits = _perform_tests(
+ log_F_ext=log_F_ext,
+ **wrapper_kwargs,
+ )
+ evaluations = [(fits, 0.0)]
+ else:
+ prog.set_message("Evaluating time constant ranges")
+ evaluation_kwargs: dict = dict(
+ min_log_F_ext=min_log_F_ext,
+ max_log_F_ext=max_log_F_ext,
+ num_F_ext_evaluations=abs(num_F_ext_evaluations),
+ rapid_F_ext_evaluations=rapid_F_ext_evaluations,
+ wrapper_kwargs=wrapper_kwargs,
+ prog=prog,
+ )
+ if num_F_ext_evaluations <= 0:
+ evaluations = _evaluate_log_F_ext_using_lmfit(**evaluation_kwargs)
+ elif num_procs > 1:
+ # TODO: Figure out why this causes a RuntimeError related to
+ # the matplotlib window. Tends to happen when using the CLI and
+ # several windows have been shown. The same doesn't happen when,
+ # e.g., performing multiple fits in a row via the CLI.
+ # EDIT: Seems to be related to using an interactive
+ # matplotlib backend such as TkAgg. Probably need to figure out
+ # how to handle using a combination of Agg for generating
+ # figures and then also supporting displaying them using, e.g.,
+ # TkAgg.
+ from matplotlib import get_backend
+
+ with Pool(num_procs) as pool:
+ evaluations = _evaluate_log_F_ext_using_custom_approach(
+ _map=pool.map if get_backend().lower() == "agg" else map,
+ **evaluation_kwargs,
+ )
+ else:
+ evaluations = _evaluate_log_F_ext_using_custom_approach(
+ _map=map,
+ **evaluation_kwargs,
+ )
+
+ f: Frequencies = data.get_frequencies()
+ Z_exp: ComplexImpedances = data.get_impedances()
+ result_sets: List[Tuple[float, List[KramersKronigResult]]] = []
+
+ if not isinstance(test, str):
+ raise TypeError(f"Expected a string instead of {test=}")
+
+ fits: _KKFits
+ statistic: float
+ for fits, statistic in sorted(evaluations, key=lambda e: e[1]):
+ results: List[KramersKronigResult] = []
+ for circuit, pseudo_chisqr in zip(
+ fits.circuits,
+ fits.pseudo_chisqrs,
+ ):
+ Z_fit: ComplexImpedances = circuit.get_impedances(f)
+ results.append(
+ KramersKronigResult(
+ circuit=circuit,
+ pseudo_chisqr=pseudo_chisqr,
+ frequencies=f,
+ impedances=Z_fit,
+ # Residuals calculated according to eqs. 15 and 16
+ # in Schönleber et al. (2014)
+ residuals=_calculate_residuals(
+ Z_exp=Z_exp,
+ Z_fit=Z_fit,
+ ),
+ test=test,
+ )
+ )
+
+ result_sets.append((fits.log_F_ext, results, statistic))
+
+ return result_sets
+
+
+def _evaluate_representations(
+ representations: List[bool],
+ data: DataSet,
+ test: str = "real",
+ num_RCs: Optional[List[int]] = None,
+ add_capacitance: bool = True,
+ add_inductance: bool = True,
+ min_log_F_ext: float = -1.0,
+ max_log_F_ext: float = 1.0,
+ log_F_ext: float = 0.0,
+ num_F_ext_evaluations: int = 20,
+ rapid_F_ext_evaluations: bool = True,
+ cnls_method: str = "leastsq",
+ max_nfev: int = 0,
+ timeout: int = 60,
+ num_procs: int = -1,
+) -> List[Tuple[float, List[KramersKronigResult], float]]:
+ evaluations: List[Tuple[float, List[KramersKronigResult], float]] = []
+
+ admittance: bool
+ for admittance in representations:
+ evaluations.append(
+ evaluate_log_F_ext(
+ data=data,
+ test=test,
+ num_RCs=num_RCs,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ min_log_F_ext=min_log_F_ext,
+ max_log_F_ext=max_log_F_ext,
+ log_F_ext=log_F_ext,
+ num_F_ext_evaluations=num_F_ext_evaluations,
+ rapid_F_ext_evaluations=rapid_F_ext_evaluations,
+ cnls_method=cnls_method,
+ max_nfev=max_nfev,
+ timeout=timeout,
+ num_procs=num_procs,
+ )[0]
+ )
+
+ return evaluations
+
+
+def perform_exploratory_kramers_kronig_tests(
+ data: DataSet,
+ test: str = "real",
+ num_RCs: Optional[List[int]] = None,
+ add_capacitance: bool = True,
+ add_inductance: bool = True,
+ admittance: Optional[bool] = None,
+ min_log_F_ext: float = -1.0,
+ max_log_F_ext: float = 1.0,
+ log_F_ext: float = 0.0,
+ num_F_ext_evaluations: int = 20,
+ rapid_F_ext_evaluations: bool = True,
+ cnls_method: str = "leastsq",
+ max_nfev: int = 0,
+ timeout: int = 60,
+ num_procs: int = -1,
+ **kwargs,
+) -> Tuple[
+ List[KramersKronigResult],
+ Tuple[KramersKronigResult, Dict[int, float], int, int],
+]:
+ """
+ Similar to |perform_kramers_kronig_test| but returns some intermediate results rather than only the final |KramersKronigResult|.
+ This function acts as a wrapper for |evaluate_log_F_ext|, |suggest_num_RC|, and |suggest_representation|.
+
+ References:
+
+ - B.A. Boukamp, 1995, J. Electrochem. Soc., 142, 1885-1894 (https://doi.org/10.1149/1.2044210)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ data: DataSet
+ The data set to be tested.
+
+ test: str, optional
+ See |perform_kramers_kronig_test| for details.
+
+ num_RCs: Optional[List[int]], optional
+ A list of integers of the various numbers of RC elements to test.
+ If a None value is provided (i.e., the default), then the range of values to test is determined automatically.
+
+ add_capacitance: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ add_inductance: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ admittance: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ min_log_F_ext: float, optional
+ See |perform_kramers_kronig_test| for details.
+
+ max_log_F_ext: float, optional
+ See |perform_kramers_kronig_test| for details.
+
+ log_F_ext: float, optional
+ See |perform_kramers_kronig_test| for details.
+
+ num_F_ext_evaluations: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ rapid_F_ext_evaluations: bool, optional
+ See |perform_kramers_kronig_test| for details.
+
+ cnls_method: str, optional
+ See |perform_kramers_kronig_test| for details.
+
+ max_nfev: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ timeout: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ num_procs: int, optional
+ See |perform_kramers_kronig_test| for details.
+
+ **kwargs
+ See |perform_kramers_kronig_test| for details.
+
+ Returns
+ -------
+ Tuple[List[KramersKronigResult], Tuple[KramersKronigResult, Dict[int, float], int, int]]:
+ A tuple containing a list of |KramersKronigResult| and the corresponding result of |suggest_num_RC|.
+ """
+ if not (_is_boolean(admittance) or admittance is None):
+ raise TypeError(f"Expected a boolean or None instead of {admittance=}")
+
+ evaluations: List[Tuple[float, List[KramersKronigResult], float]]
+ evaluations = _evaluate_representations(
+ representations=[False, True] if admittance is None else [admittance],
+ data=data,
+ test=test,
+ num_RCs=num_RCs,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ min_log_F_ext=min_log_F_ext,
+ max_log_F_ext=max_log_F_ext,
+ log_F_ext=log_F_ext,
+ num_F_ext_evaluations=num_F_ext_evaluations,
+ rapid_F_ext_evaluations=rapid_F_ext_evaluations,
+ cnls_method=cnls_method,
+ max_nfev=max_nfev,
+ timeout=timeout,
+ num_procs=num_procs,
+ )
+ assert len(evaluations) > 0, evaluations
+
+ suggestions: List[Tuple[KramersKronigResult, Dict[int, float], int, int]] = []
+
+ tests: List[KramersKronigResult] = []
+ for _, tests, *_ in evaluations:
+ suggestions.append(suggest_num_RC(tests, **kwargs))
+
+ if not (len(suggestions) == len(evaluations) > 1):
+ raise ValueError(f"Expected {len(suggestions)=} == {len(evaluations)=} > 1")
+
+ suggestion = suggest_representation(suggestions)
+
+ for _, tests, *_ in evaluations:
+ if tests[0].admittance == suggestion[0].admittance:
+ break
+ else:
+ raise ValueError(f"Expected {tests[0].admittance=} == {suggestion[0].admittance=}")
+
+ return (tests, suggestion)
diff --git a/src/pyimpspec/analysis/kramers_kronig/least_squares.py b/src/pyimpspec/analysis/kramers_kronig/least_squares.py
new file mode 100644
index 0000000..c84b07b
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/least_squares.py
@@ -0,0 +1,578 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from typing import (
+ List,
+ Tuple,
+)
+from numpy import (
+ complex128,
+ float64,
+ inf,
+ pi,
+ sum as array_sum,
+ zeros,
+)
+from numpy.linalg import lstsq
+from numpy.typing import NDArray
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.elements import (
+ Capacitor,
+ Inductor,
+ KramersKronigAdmittanceRC,
+ KramersKronigRC,
+ Resistor,
+)
+from pyimpspec.exceptions import KramersKronigError
+from pyimpspec.typing import (
+ ComplexImpedances,
+ Frequencies,
+)
+from pyimpspec.typing.helpers import (
+ _is_boolean,
+ _is_complex_array,
+ _is_floating,
+ _is_floating_array,
+ _is_integer,
+)
+from .utility import (
+ _generate_circuit,
+ _generate_time_constants,
+)
+
+
+def _initialize_A_matrix(
+ test: str,
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ add_inductance: bool,
+) -> NDArray[float64]:
+ m: int = len(w) * (2 if test == "complex" else 1)
+ n: int = len(taus) + 1
+
+ if add_capacitance:
+ n += 1
+
+ if add_inductance:
+ n += 1
+
+ return zeros((m, n), dtype=float64)
+
+
+def _add_resistance_to_A_matrix(
+ A: NDArray[float64],
+ test: str,
+):
+ m: int = A.shape[0]
+ i: int = 0
+
+ if test == "complex":
+ A[0:m // 2, i] = 1
+ elif test == "real":
+ A[0:m, i] = 1
+
+
+def _calculate_kth_A_matrix_variables(
+ w: NDArray,
+ tau: float64,
+ admittance: bool,
+) -> NDArray[complex128]:
+ if admittance:
+ return w / (w * tau - 1j)
+ else:
+ return 1 / (1 + 1j * w * tau)
+
+
+def _add_kth_variables_to_A_matrix(
+ A: NDArray[float64],
+ test: str,
+ w: NDArray[float64],
+ tau: float64,
+ i: int,
+ admittance: bool,
+):
+ c: NDArray[complex128] = _calculate_kth_A_matrix_variables(
+ w=w,
+ tau=tau,
+ admittance=admittance,
+ )
+
+ m: int = A.shape[0]
+ if test == "complex":
+ A[0:m // 2, i] = c.real
+ A[m // 2:, i] = c.imag
+ elif test == "real":
+ A[0:m, i] = c.real
+ else:
+ A[0:m, i] = c.imag
+
+
+def _add_capacitance_to_A_matrix(
+ A: NDArray[float64],
+ test: str,
+ w: NDArray[float64],
+ i: int,
+ admittance: bool,
+):
+ m: int = A.shape[0]
+
+ if test == "complex":
+ A[m // 2:, i] = w if admittance else (-1 / w)
+ elif test == "imaginary":
+ A[:, i] = w if admittance else (-1 / w)
+
+
+def _add_inductance_to_A_matrix(
+ A: NDArray[float64],
+ test: str,
+ w: NDArray[float64],
+ i: int,
+ admittance: bool,
+):
+ m: int = A.shape[0]
+
+ if test == "complex":
+ A[m // 2:, i] = (1 / w) if admittance else w
+ elif test == "imaginary":
+ A[:, i] = (1 / w) if admittance else w
+
+
+def _generate_A_matrix(
+ test: str,
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ add_inductance: bool,
+ admittance: bool,
+) -> NDArray[float64]:
+ A: NDArray[float64] = _initialize_A_matrix(
+ test,
+ w,
+ taus,
+ add_capacitance,
+ add_inductance,
+ )
+
+ # Series or parallel R
+ _add_resistance_to_A_matrix(A=A, test=test)
+
+ # R_k or C_k
+ i: int
+ tau: float64
+ for i, tau in enumerate(taus, start=1):
+ _add_kth_variables_to_A_matrix(
+ A=A,
+ test=test,
+ w=w,
+ tau=tau,
+ i=i,
+ admittance=admittance,
+ )
+
+ # Series or parallel C
+ if add_capacitance:
+ i += 1
+ _add_capacitance_to_A_matrix(
+ A=A,
+ test=test,
+ w=w,
+ i=i,
+ admittance=admittance,
+ )
+
+ # Series or parallel L
+ if add_inductance:
+ i += 1
+ _add_inductance_to_A_matrix(
+ A=A,
+ test=test,
+ w=w,
+ i=i,
+ admittance=admittance,
+ )
+
+ return A
+
+
+def _initialize_b_vector(test: str, Z_exp: ComplexImpedances) -> NDArray[float64]:
+ m: int = len(Z_exp) * (2 if test == "complex" else 1)
+
+ return zeros(m, dtype=float64)
+
+
+def _add_values_to_b_vector(
+ b: NDArray[float64],
+ test: str,
+ Z_exp: ComplexImpedances,
+ admittance: bool,
+):
+ m: int = b.shape[0]
+
+ if test == "complex":
+ b[0:m // 2] = (Z_exp ** (-1 if admittance else 1)).real
+ b[m // 2:] = (Z_exp ** (-1 if admittance else 1)).imag
+ elif test == "real":
+ b[0:m] = (Z_exp ** (-1 if admittance else 1)).real
+ else:
+ b[0:m] = (Z_exp ** (-1 if admittance else 1)).imag
+
+
+def _generate_b_vector(
+ test: str,
+ Z_exp: ComplexImpedances,
+ admittance: bool,
+) -> NDArray[float64]:
+ b: NDArray[float64] = _initialize_b_vector(test, Z_exp)
+ _add_values_to_b_vector(b, test, Z_exp, admittance)
+
+ return b
+
+
+def _update_circuit(
+ circuit: Circuit,
+ variables: NDArray[float64],
+ add_capacitance: bool,
+ add_inductance: bool,
+ admittance: bool,
+):
+ elements: List[Element] = circuit.get_elements(recursive=True)
+ if len(elements) != len(variables):
+ raise ValueError(f"Expected the circuit to contain as many elements ({len(elements)=}) as there are variables ({len(variables)=})")
+
+ # Series or parallel R
+ R: float64
+ R, variables = variables[0], variables[1:]
+ for element in elements:
+ if isinstance(element, Resistor):
+ if admittance:
+ if R == 0.0:
+ R = inf
+ else:
+ R = 1 / R
+ element.set_values(R=R)
+ break
+ else:
+ raise KramersKronigError("Failed to update series/parallel R!")
+
+ # Series or parallel L
+ if add_inductance:
+ L: float64
+ L, variables = variables[-1], variables[:-1]
+ for element in elements:
+ if isinstance(element, Inductor):
+ element.set_values(L=(-1 / L) if admittance else L)
+ element.set_lower_limits(L=-inf)
+ element.set_upper_limits(L=inf)
+ break
+ else:
+ raise KramersKronigError("Failed to update series/parallel L!")
+
+ # Series or parallel C
+ if add_capacitance:
+ C: float64
+ C, variables = variables[-1], variables[:-1]
+ for element in elements:
+ if isinstance(element, Capacitor):
+ element.set_values(C=C if admittance else 1 / C)
+ element.set_lower_limits(C=-inf)
+ element.set_upper_limits(C=inf)
+ break
+ else:
+ raise KramersKronigError("Failed to update series/parallel C!")
+
+ # Fitted R_k or C_k
+ for i, element in enumerate(
+ filter(
+ lambda e: isinstance(
+ e,
+ KramersKronigAdmittanceRC if admittance else KramersKronigRC,
+ ),
+ elements,
+ )
+ ):
+ if admittance:
+ element.set_values(C=variables[i])
+ else:
+ element.set_values(R=variables[i])
+
+
+def _real_test(
+ Z_exp: ComplexImpedances,
+ f: Frequencies,
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ add_capacitance,
+ add_inductance,
+ admittance,
+) -> Circuit:
+ test: str = "real"
+ A: NDArray[float64] = _generate_A_matrix(
+ test,
+ w,
+ taus,
+ add_capacitance=False,
+ add_inductance=False,
+ admittance=admittance,
+ )
+ b: NDArray[float64] = _generate_b_vector(test, Z_exp, admittance)
+ x: NDArray[float64] = lstsq(A, b, rcond=None)[0]
+
+ circuit: Circuit = _generate_circuit(
+ taus,
+ add_capacitance=False,
+ add_inductance=False,
+ admittance=admittance,
+ )
+ _update_circuit(
+ circuit,
+ x,
+ add_capacitance=False,
+ add_inductance=False,
+ admittance=admittance,
+ )
+
+ if add_capacitance or add_inductance:
+ for element in circuit.get_elements(recursive=True):
+ if isinstance(element, Resistor):
+ break
+ else:
+ raise ValueError("Failed to find series/parallel resistance!")
+
+ for connection in circuit.get_connections(recursive=True):
+ if connection.contains(element, top_level=True):
+ break
+ else:
+ raise ValueError(
+ "Failed to find series/parallel connection containing the resistance!"
+ )
+
+ A = zeros((w.size, int(add_capacitance) + int(add_inductance)), dtype=float64)
+ if add_capacitance:
+ A[:, 0] = w if admittance else (-1 / w)
+
+ if add_inductance:
+ A[:, -1] = (1 / w) if admittance else w
+
+ b = _generate_b_vector(
+ "imaginary",
+ (
+ Z_exp ** (-1 if admittance else 1)
+ - circuit.get_impedances(f) ** (-1 if admittance else 1)
+ )
+ ** (-1 if admittance else 1),
+ admittance,
+ )
+
+ corrections: NDArray[float64] = lstsq(A, b, rcond=None)[0]
+
+ i: int = len(x) - 1
+ tmp: NDArray[float64] = x
+ x = zeros(len(x) + int(add_capacitance) + int(add_inductance), dtype=float64)
+ x[: len(tmp)] = tmp
+
+ if add_capacitance:
+ i += 1
+ x[i] = corrections[0]
+ connection.append(Capacitor())
+
+ if add_inductance:
+ i += 1
+ x[i] = corrections[-1]
+ connection.append(Inductor())
+
+ _update_circuit(
+ circuit,
+ x,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ )
+
+ return circuit
+
+
+def _imaginary_test(
+ Z_exp: ComplexImpedances,
+ f: Frequencies,
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ weight: NDArray[float64],
+ add_capacitance,
+ add_inductance,
+ admittance,
+) -> Circuit:
+ test: str = "imaginary"
+ A: NDArray[float64] = _generate_A_matrix(
+ test,
+ w,
+ taus,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ )
+ b: NDArray[float64] = _generate_b_vector(test, Z_exp, admittance)
+ x: NDArray[float64] = lstsq(A, b, rcond=None)[0]
+
+ circuit: Circuit = _generate_circuit(
+ taus,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ )
+ _update_circuit(circuit, x, add_capacitance, add_inductance, admittance)
+
+ X_exp: NDArray[complex128] = Z_exp ** (-1 if admittance else 1)
+ X_fit: NDArray[complex128] = circuit.get_impedances(f) ** (-1 if admittance else 1)
+ x[0] = array_sum(weight * (X_exp.real - X_fit.real)) / array_sum(weight)
+ _update_circuit(circuit, x, add_capacitance, add_inductance, admittance)
+
+ return circuit
+
+
+def _complex_test(
+ Z_exp: ComplexImpedances,
+ f: Frequencies,
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ add_capacitance,
+ add_inductance,
+ admittance,
+) -> Circuit:
+ test: str = "complex"
+ A: NDArray[float64] = _generate_A_matrix(
+ test,
+ w,
+ taus,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ )
+ b: NDArray[float64] = _generate_b_vector(test, Z_exp, admittance)
+ x: NDArray[float64] = lstsq(A, b, rcond=None)[0]
+
+ circuit: Circuit = _generate_circuit(
+ taus,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ )
+ _update_circuit(circuit, x, add_capacitance, add_inductance, admittance)
+
+ return circuit
+
+
+def _test_wrapper(args: tuple) -> Tuple[int, Circuit]:
+ test: str
+ f: Frequencies
+ Z_exp: ComplexImpedances
+ weight: NDArray[float64]
+ num_RC: int
+ add_capacitance: bool
+ add_inductance: bool
+ admittance: bool
+ log_F_ext: float
+ (
+ test,
+ f,
+ Z_exp,
+ weight,
+ num_RC,
+ add_capacitance,
+ add_inductance,
+ admittance,
+ log_F_ext,
+ ) = args
+
+ if not isinstance(test, str):
+ raise TypeError(f"Expected a string instead of {test=}")
+ elif test not in ("complex", "real", "imaginary"):
+ raise ValueError(
+ f"Expected 'complex', 'real', or 'imaginary' instead of {test=}"
+ )
+
+ if not _is_floating_array(f):
+ raise TypeError(f"Expected an array of floats instead of {f=}")
+
+ if not _is_complex_array(Z_exp):
+ raise TypeError(f"Expected an array of complex values instead of {Z_exp=}")
+
+ if not _is_floating_array(weight):
+ raise TypeError(f"Expected an an array of floats instead of {weight=}")
+
+ if not _is_integer(num_RC):
+ raise TypeError(f"Expected an integer instead of {num_RC=}")
+
+ if not _is_boolean(add_capacitance):
+ raise TypeError(f"Expected a boolean instead of {add_capacitance=}")
+
+ if not _is_boolean(add_inductance):
+ raise TypeError(f"Expected a boolean instead of {add_inductance=}")
+
+ if not _is_boolean(admittance):
+ raise TypeError(f"Expected a boolean instead of {admittance=}")
+
+ if not _is_floating(log_F_ext):
+ raise TypeError(f"Expected a float instead of {log_F_ext=}")
+
+ w: NDArray[float64] = 2 * pi * f
+ taus: NDArray[float64] = _generate_time_constants(w, num_RC, log_F_ext)
+ circuit: Circuit
+ if test == "real":
+ circuit = _real_test(
+ Z_exp=Z_exp,
+ f=f,
+ w=w,
+ taus=taus,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ )
+
+ elif test == "imaginary":
+ circuit = _imaginary_test(
+ Z_exp=Z_exp,
+ f=f,
+ w=w,
+ taus=taus,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ weight=weight,
+ )
+
+ elif test == "complex":
+ circuit = _complex_test(
+ Z_exp=Z_exp,
+ f=f,
+ w=w,
+ taus=taus,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ )
+
+ else:
+ raise ValueError(f"Unsupported test type ({test=})")
+
+ return (
+ num_RC,
+ circuit,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/matrix_inversion.py b/src/pyimpspec/analysis/kramers_kronig/matrix_inversion.py
new file mode 100644
index 0000000..197e28f
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/matrix_inversion.py
@@ -0,0 +1,497 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from typing import Tuple
+from numpy import (
+ abs,
+ complex128,
+ float64,
+ inf,
+ pi,
+ sum as array_sum,
+ zeros,
+)
+from numpy.linalg import (
+ inv,
+ pinv,
+)
+from numpy.typing import NDArray
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.elements import (
+ Capacitor,
+ Inductor,
+ KramersKronigAdmittanceRC,
+ KramersKronigRC,
+ Resistor,
+)
+from pyimpspec.exceptions import KramersKronigError
+from pyimpspec.typing import (
+ ComplexImpedances,
+ Frequencies,
+ Impedances,
+)
+from pyimpspec.typing.helpers import (
+ List,
+ _is_boolean,
+ _is_complex_array,
+ _is_floating,
+ _is_floating_array,
+ _is_integer,
+)
+from .utility import (
+ _generate_circuit,
+ _generate_time_constants,
+)
+
+
+def _update_circuit(
+ circuit: Circuit,
+ variables: NDArray[float64],
+ add_capacitance: bool,
+ admittance: bool,
+):
+ elements: List[Element] = circuit.get_elements(recursive=True)
+ if len(elements) != len(variables):
+ raise ValueError(
+ f"Expected the circuit to contain as many elements ({len(elements)=}) as there are variables ({len(variables)=})"
+ )
+
+ # Series or parallel R
+ R: float64
+ R, variables = variables[0], variables[1:]
+ if admittance:
+ if R == 0.0:
+ R = 1e18
+ else:
+ R = 1 / R
+
+ for element in elements:
+ if isinstance(element, Resistor):
+ element.set_values(R=R)
+ break
+ else:
+ raise KramersKronigError("Failed to update series/parallel R!")
+
+ # Series or parallel L
+ L: float64
+ L, variables = variables[-1], variables[:-1]
+ if admittance:
+ if L == 0.0:
+ L = 1e18
+ else:
+ L = 1 / L
+
+ L *= -1
+
+ if add_capacitance:
+ C: float64
+ C, variables = variables[-1], variables[:-1]
+ if C == 0.0:
+ C = 1e-50
+
+ if not admittance:
+ C = 1 / C
+
+ for element in elements:
+ if isinstance(element, Inductor):
+ element.set_values(L=L)
+ element.set_lower_limits(L=-inf)
+ element.set_upper_limits(L=inf)
+ break
+ else:
+ raise KramersKronigError("Failed to update series/parallel L!")
+
+ # Series or parallel C
+ if add_capacitance:
+ for element in elements:
+ if isinstance(element, Capacitor):
+ element.set_values(C=C)
+ element.set_lower_limits(C=-inf)
+ element.set_upper_limits(C=inf)
+ break
+ else:
+ raise KramersKronigError("Failed to update series/parallel C!")
+
+ # Fitted R_k or C_k
+ for i, element in enumerate(
+ filter(
+ lambda e: isinstance(
+ e,
+ KramersKronigAdmittanceRC if admittance else KramersKronigRC,
+ ),
+ elements,
+ )
+ ):
+ if admittance:
+ element.set_values(C=variables[i])
+ else:
+ element.set_values(R=variables[i])
+
+
+def _initialize_A_matrices(
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ add_capacitance: bool,
+) -> Tuple[NDArray[float64], NDArray[float64]]:
+ # Generate matrices with the following columns
+ # (top to bottom is left to right)
+ # - R0, resistance
+ # - Ri or Ci associated with taus[i - 1] (0 < i <= num_RC)
+ # - C, optional capacitance
+ # - L, inductance
+ shape: Tuple[int, int] = (w.size, len(taus) + (3 if add_capacitance else 2))
+
+ A_re: NDArray[float64] = zeros(shape, dtype=float64)
+ A_im: NDArray[float64] = zeros(shape, dtype=float64)
+
+ return (A_re, A_im)
+
+
+def _add_resistance_to_A_matrix(A_re: NDArray[float64]):
+ # See Fig. 1 (impedance) and 13 (admittance) in Boukamp (1995)
+ A_re[:, 0] = 1
+
+
+def _add_capacitance_to_A_matrix(
+ A_im: NDArray[float64],
+ w: NDArray[float64],
+ admittance: bool,
+):
+ if admittance:
+ A_im[:, -2] = w
+ else:
+ A_im[:, -2] = -1 / w
+
+
+def _add_inductance_to_A_matrix(
+ A_im: NDArray[float64],
+ w: NDArray[float64],
+ admittance: bool,
+):
+ if admittance:
+ A_im[:, -1] = 1 / w
+ else:
+ A_im[:, -1] = w
+
+
+def _add_kth_variables_to_A_matrices(
+ A_re: NDArray[float64],
+ A_im: NDArray[float64],
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ admittance: bool,
+):
+ i: int
+ tau: float64
+
+ if admittance:
+ for i, tau in enumerate(taus):
+ A_re[:, i + 1] = w**2 * tau / (1 + (w * tau) ** 2)
+ A_im[:, i + 1] = w / (1 + (w * tau) ** 2)
+ else:
+ for i, tau in enumerate(taus):
+ k: NDArray[complex128] = 1 / (1 + 1j * w * tau)
+ A_re[:, i + 1] = k.real
+ A_im[:, i + 1] = k.imag
+
+
+def _scale_A_matrices(
+ A_re: NDArray[float64],
+ A_im: NDArray[float64],
+ abs_X_exp: Impedances,
+):
+ i: int
+ for i in range(A_re.shape[1]):
+ A_re[:, i] /= abs_X_exp
+ A_im[:, i] /= abs_X_exp
+
+
+def _generate_A_matrices(
+ w: NDArray[float64],
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ admittance: bool,
+ abs_X_exp: Impedances,
+) -> Tuple[NDArray[float64], NDArray[float64]]:
+ A_re: NDArray[float64]
+ A_im: NDArray[float64]
+ A_re, A_im = _initialize_A_matrices(
+ w=w,
+ taus=taus,
+ add_capacitance=add_capacitance,
+ )
+
+ _add_resistance_to_A_matrix(A_re)
+
+ if add_capacitance:
+ _add_capacitance_to_A_matrix(
+ A_im=A_im,
+ w=w,
+ admittance=admittance,
+ )
+
+ _add_inductance_to_A_matrix(
+ A_im=A_im,
+ w=w,
+ admittance=admittance,
+ )
+
+ _add_kth_variables_to_A_matrices(
+ A_re=A_re,
+ A_im=A_im,
+ w=w,
+ taus=taus,
+ admittance=admittance,
+ )
+
+ _scale_A_matrices(
+ A_re=A_re,
+ A_im=A_im,
+ abs_X_exp=abs_X_exp,
+ )
+
+ return (
+ A_re,
+ A_im,
+ )
+
+
+def _real_test(
+ A_re: NDArray[float64],
+ X_exp: NDArray[complex128],
+ w: NDArray[float64],
+ f: Frequencies,
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ admittance: bool,
+ circuit: Circuit,
+) -> NDArray[float64]:
+ abs_X_exp: NDArray[float64] = abs(X_exp)
+
+ # Fit using the real part
+ variables: NDArray[float64] = pinv(A_re).dot(X_exp.real / abs_X_exp)
+ if add_capacitance:
+ # Nullifies the capacitance without dividing by 0
+ variables[-2] = 1e-18
+
+ # Fit using the imaginary part to fix the series/parallel
+ # inductance (and capacitance)
+ A_im: NDArray[float64] = zeros((w.size, 2), dtype=float64)
+ A_im[:, -1] = (1 / w) if admittance else w
+ if add_capacitance:
+ A_im[:, -2] = w if admittance else (-1 / w)
+
+ # Scaling
+ for i in range(A_im.shape[1]):
+ A_im[:, i] /= abs_X_exp
+
+ # Update the circuit and calculate the impedance or admittance
+ _update_circuit(
+ circuit=circuit,
+ variables=variables,
+ add_capacitance=add_capacitance,
+ admittance=admittance,
+ )
+ X_fit: NDArray[complex128] = circuit.get_impedances(f) ** (-1 if admittance else 1)
+
+ # Extract the corrected series/parallel inductance (and capacitance)
+ coefs: NDArray[float64] = pinv(A_im).dot((X_exp.imag - X_fit.imag) / abs_X_exp)
+ if add_capacitance:
+ variables[-2:] = coefs
+ else:
+ variables[-1] = coefs[-1]
+
+ return variables
+
+
+def _imaginary_test(
+ A_im: NDArray[float64],
+ X_exp: NDArray[complex128],
+ f: Frequencies,
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ admittance: bool,
+ weight: NDArray[float64],
+ circuit: Circuit,
+) -> NDArray[float64]:
+ abs_X_exp: NDArray[float64] = abs(X_exp)
+
+ # Fit using the imaginary part
+ variables: NDArray[float64] = pinv(A_im).dot(X_exp.imag / abs_X_exp)
+
+ # Update the circuit and calculate the impedance or admittance
+ _update_circuit(
+ circuit=circuit,
+ variables=variables,
+ add_capacitance=add_capacitance,
+ admittance=admittance,
+ )
+ X_fit: NDArray[complex128] = circuit.get_impedances(f) ** (-1 if admittance else 1)
+
+ # Estimate the series or parallel resistance
+ variables[0] = array_sum(weight * (X_exp.real - X_fit.real)) / array_sum(weight)
+
+ return variables
+
+
+def _complex_test(
+ A_re: NDArray[float64],
+ A_im: NDArray[float64],
+ X_exp: NDArray[complex128],
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ admittance: bool,
+ circuit: Circuit,
+) -> NDArray[float64]:
+ abs_X_exp: NDArray[float64] = abs(X_exp)
+
+ # Fit using the complex impedance
+ x: NDArray[float64] = inv(A_re.T.dot(A_re) + A_im.T.dot(A_im))
+
+ y: NDArray[float64] = A_re.T.dot(X_exp.real / abs_X_exp) + A_im.T.dot(
+ X_exp.imag / abs_X_exp
+ )
+
+ variables: NDArray[float64] = x.dot(y)
+
+ return variables
+
+
+def _test_wrapper(args: tuple) -> Tuple[int, Circuit]:
+ test: str
+ f: Frequencies
+ Z_exp: ComplexImpedances
+ weight: NDArray[float64]
+ num_RC: int
+ add_capacitance: bool
+ admittance: bool
+ log_F_ext: float
+ (
+ test,
+ f,
+ Z_exp,
+ weight,
+ num_RC,
+ add_capacitance,
+ admittance,
+ log_F_ext,
+ ) = args
+
+ if not isinstance(test, str):
+ raise TypeError(f"Expected a string instead of {test=}")
+ elif test not in ("complex", "real", "imaginary"):
+ raise ValueError(
+ f"Expected 'complex', 'real', or 'imaginary' instead of {test=}"
+ )
+
+ if not _is_floating_array(f):
+ raise TypeError(f"Expected an array of floats instead of {f=}")
+
+ if not _is_complex_array(Z_exp):
+ raise TypeError(f"Expected an array of complex values instead of {Z_exp=}")
+
+ if not _is_floating_array(weight):
+ raise TypeError(f"Expected an array of floats instead of {weight=}")
+
+ if not _is_integer(num_RC):
+ raise TypeError(f"Expected an integer instead of {num_RC=}")
+
+ if not _is_boolean(add_capacitance):
+ raise TypeError(f"Expected a boolean instead of {add_capacitance=}")
+
+ if not _is_boolean(admittance):
+ raise TypeError(f"Expected a boolean instead of {admittance=}")
+
+ if not _is_floating(log_F_ext):
+ raise TypeError(f"Expected a float instead of {log_F_ext=}")
+
+ X_exp: NDArray[complex128] = Z_exp ** (-1 if admittance else 1)
+ w: NDArray[float64] = 2 * pi * f
+ taus: NDArray[float64] = _generate_time_constants(w, num_RC, log_F_ext)
+
+ A_re: NDArray[float64]
+ A_im: NDArray[float64]
+ A_re, A_im = _generate_A_matrices(
+ w,
+ taus,
+ add_capacitance,
+ admittance,
+ abs(X_exp),
+ )
+
+ circuit: Circuit = _generate_circuit(
+ taus,
+ add_capacitance,
+ True,
+ admittance,
+ )
+
+ # Solve the set of linear equations and update the circuit's parameters
+ variables: NDArray[float64]
+ if test == "real":
+ variables = _real_test(
+ A_re,
+ X_exp,
+ w,
+ f,
+ taus,
+ add_capacitance,
+ admittance,
+ circuit,
+ )
+
+ elif test == "imaginary":
+ variables = _imaginary_test(
+ A_im,
+ X_exp,
+ f,
+ taus,
+ add_capacitance,
+ admittance,
+ weight,
+ circuit,
+ )
+
+ elif test == "complex":
+ variables = _complex_test(
+ A_re,
+ A_im,
+ X_exp,
+ taus,
+ add_capacitance,
+ admittance,
+ circuit,
+ )
+
+ else:
+ raise ValueError(f"Unsupported test type {test=}")
+
+ # Update the circuit
+ _update_circuit(
+ circuit=circuit,
+ variables=variables,
+ add_capacitance=add_capacitance,
+ admittance=admittance,
+ )
+
+ return (
+ num_RC,
+ circuit,
+ )
diff --git a/src/pyimpspec/analysis/kramers_kronig/result.py b/src/pyimpspec/analysis/kramers_kronig/result.py
new file mode 100644
index 0000000..8fa176e
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/result.py
@@ -0,0 +1,808 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from dataclasses import dataclass
+from functools import cached_property
+from numpy import (
+ abs,
+ angle,
+ array,
+ float64,
+ isclose,
+ isnan,
+ log10 as log,
+ logical_and,
+ mean,
+ nan,
+ pi,
+ std,
+ sum as array_sum,
+)
+from numpy.typing import NDArray
+from pyimpspec.analysis.utility import _interpolate
+from pyimpspec.analysis.kramers_kronig.utility import (
+ _estimate_pct_noise,
+ _format_log_F_ext_for_latex,
+)
+from pyimpspec.circuit.circuit import Circuit
+from pyimpspec.circuit.kramers_kronig import (
+ Element,
+ KramersKronigRC,
+ KramersKronigAdmittanceRC,
+)
+from pyimpspec.circuit.connections import (
+ Parallel,
+ Series,
+)
+from pyimpspec.circuit.elements import (
+ Capacitor,
+ Resistor,
+ Inductor,
+)
+from pyimpspec.typing import (
+ ComplexImpedances,
+ ComplexResiduals,
+ Frequencies,
+ Impedances,
+ Phases,
+ Residuals,
+ TimeConstants,
+)
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Tuple,
+ Type,
+ Union,
+ _is_integer,
+ _is_floating,
+)
+
+
+@dataclass(frozen=True)
+class KramersKronigResult:
+ """
+ An object representing the results of a linear Kramers-Kronig test applied to a data set.
+
+ Parameters
+ ----------
+ circuit: Circuit
+ The fitted circuit.
+
+ pseudo_chisqr: float
+ The pseudo chi-squared value (|pseudo chi-squared|, eq. 14 in Boukamp, 1995).
+
+ frequencies: |Frequencies|
+ The frequencies used to perform the test.
+
+ impedances: |ComplexImpedances|
+ The impedances produced by the fitted circuit at each of the tested frequencies.
+
+ residuals: |ComplexResiduals|
+ The residuals for the real (eq. 15 in Schönleber et al., 2014) and imaginary (eq. 16 in Schönleber et al., 2014) parts of the fit.
+
+ test: str
+ The type of test (and implementation) that was performed:
+
+ - 'complex'
+ - 'real'
+ - 'imaginary'
+ - 'complex-inv'
+ - 'real-inv'
+ - 'imaginary-inv'
+ - 'cnls'
+ """
+
+ circuit: Circuit
+ pseudo_chisqr: float
+ frequencies: Frequencies
+ impedances: ComplexImpedances
+ residuals: ComplexResiduals
+ test: str
+
+ def __repr__(self) -> str:
+ return (
+ "KramersKronigResult ("
+ + ", ".join(
+ (
+ f"X={'Y' if self.admittance else 'Z'}",
+ f"log_F_ext={self.log_F_ext:.3f}",
+ f"num_RC={self.num_RC}",
+ f"{hex(id(self))}",
+ )
+ )
+ + ")"
+ )
+
+ @cached_property
+ def num_RC(self) -> int:
+ return self.get_num_RC()
+
+ @cached_property
+ def admittance(self) -> bool:
+ return self.was_tested_on_admittance()
+
+ @cached_property
+ def label(self) -> str:
+ return self.get_label()
+
+ @cached_property
+ def time_constants(self) -> TimeConstants:
+ return self.get_time_constants()
+
+ @cached_property
+ def log_F_ext(self) -> float:
+ return self.get_log_F_ext()
+
+ @cached_property
+ def series_resistance(self) -> float:
+ return self.get_series_resistance()
+
+ @cached_property
+ def series_capacitance(self) -> float:
+ return self.get_series_capacitance()
+
+ @cached_property
+ def series_inductance(self) -> float:
+ return self.get_series_inductance()
+
+ @cached_property
+ def parallel_resistance(self) -> float:
+ return self.get_parallel_resistance()
+
+ @cached_property
+ def parallel_capacitance(self) -> float:
+ return self.get_parallel_capacitance()
+
+ @cached_property
+ def parallel_inductance(self) -> float:
+ return self.get_parallel_inductance()
+
+ def get_num_RC(self) -> int:
+ Class: Type[Element] = (
+ KramersKronigAdmittanceRC if self.admittance else KramersKronigRC
+ )
+
+ return len(
+ [
+ element
+ for element in self.circuit.get_elements(recursive=True)
+ if isinstance(element, Class)
+ ]
+ )
+
+ def was_tested_on_admittance(self) -> bool:
+ element: Element
+ for element in self.circuit.get_elements(recursive=True):
+ if isinstance(element, KramersKronigAdmittanceRC):
+ return True
+
+ return False
+
+ def get_label(self) -> str:
+ """
+ Get the label of this result.
+
+ Returns
+ -------
+ str
+ """
+ label: str = "Y" if self.admittance else "Z"
+ cdc: str = self.circuit.to_string()
+
+ if "C" in cdc:
+ label += ", C"
+ if "L" in cdc:
+ label += "+L"
+ elif "L" in cdc:
+ label += ", L"
+
+ label += r", $N_\tau = " + str(self.num_RC) + "$"
+
+ log_F_ext: float = self.log_F_ext
+ if log_F_ext != 0.0:
+ formatted_extension: str = _format_log_F_ext_for_latex(log_F_ext)
+
+ label += (
+ r", $\tau \in "
+ + r"[\frac{1}{f_{\rm max} \times F_{\rm ext}},"
+ + r"\frac{F_{\rm ext}}{f_{\rm min}}],"
+ + r"\log{F_{\rm ext}} = "
+ + formatted_extension
+ + r"$"
+ )
+ else:
+ label += r", $\tau \in [\frac{1}{f_{\rm max}}, \frac{1}{f_{\rm min}}]$"
+
+ return label
+
+ def get_frequencies(self, num_per_decade: int = -1) -> Frequencies:
+ """
+ Get the frequencies in the tested frequency range.
+
+ Parameters
+ ----------
+ num_per_decade: int, optional
+ The number of points per decade.
+ A positive value results in frequencies being calculated within the original frequency range.
+ Otherwise, only the original frequencies are used.
+
+ Returns
+ -------
+ |Frequencies|
+ """
+ if not _is_integer(num_per_decade):
+ raise TypeError(f"Expected an integer instead of {num_per_decade=}")
+
+ if num_per_decade > 0:
+ return _interpolate(self.frequencies, num_per_decade)
+
+ return self.frequencies
+
+ def get_impedances(self, num_per_decade: int = -1) -> ComplexImpedances:
+ """
+ Get the fitted circuit's impedance response within the tested frequency range.
+
+ Parameters
+ ----------
+ num_per_decade: int, optional
+ The number of points per decade.
+ A positive value results in data points being calculated using the fitted circuit within the original frequency range.
+ Otherwise, only the original frequencies are used.
+
+ Returns
+ -------
+ |ComplexImpedances|
+ """
+ if not _is_integer(num_per_decade):
+ raise TypeError(f"Expected an integer instead of {num_per_decade=}")
+
+ if num_per_decade > 0:
+ return self.circuit.get_impedances(self.get_frequencies(num_per_decade))
+
+ return self.impedances
+
+ def get_nyquist_data(
+ self,
+ num_per_decade: int = -1,
+ ) -> Tuple[Impedances, Impedances]:
+ """
+ Get the data necessary to plot this KramersKronigResult as a Nyquist plot: the real and the negative imaginary parts of the impedances.
+
+ Parameters
+ ----------
+ num_per_decade: int, optional
+ The number of points per decade.
+ A positive value results in data points being calculated using the fitted circuit within the original frequency range.
+ Otherwise, only the original frequencies are used.
+
+ Returns
+ -------
+ Tuple[|Impedances|, |Impedances|]
+ """
+ if not _is_integer(num_per_decade):
+ raise TypeError(f"Expected an integer instead of {num_per_decade=}")
+
+ if num_per_decade > 0:
+ Z: ComplexImpedances = self.get_impedances(num_per_decade)
+ return (
+ Z.real,
+ -Z.imag,
+ )
+
+ return (
+ self.impedances.real,
+ -self.impedances.imag,
+ )
+
+ def get_bode_data(
+ self,
+ num_per_decade: int = -1,
+ ) -> Tuple[Frequencies, Impedances, Phases]:
+ """
+ Get the data necessary to plot this KramersKronigResult as a Bode plot: the frequencies, the absolute magnitudes of the impedances, and the negative phase angles/shifts of the impedances in degrees.
+
+ Parameters
+ ----------
+ num_per_decade: int, optional
+ The number of points per decade.
+ A positive value results in data points being calculated using the fitted circuit within the original frequency range.
+ Otherwise, only the original frequencies are used.
+
+ Returns
+ -------
+ Tuple[|Frequencies|, |Impedances|, |Phases|]
+ """
+ if not _is_integer(num_per_decade):
+ raise TypeError(f"Expected an integer instead of {num_per_decade=}")
+
+ if num_per_decade > 0:
+ f: Frequencies = self.get_frequencies(num_per_decade)
+ Z: ComplexImpedances = self.circuit.get_impedances(f)
+ return (
+ f,
+ abs(Z),
+ -angle(Z, deg=True),
+ )
+
+ return (
+ self.frequencies,
+ abs(self.impedances),
+ -angle(self.impedances, deg=True),
+ )
+
+ def get_residuals_data(self) -> Tuple[Frequencies, Residuals, Residuals]:
+ """
+ Get the data necessary to plot the relative residuals for this result: the frequencies, the relative residuals for the real parts of the impedances in percents, and the relative residuals for the imaginary parts of the impedances in percents.
+
+ Returns
+ -------
+ Tuple[|Frequencies|, |Residuals|, |Residuals|]
+ """
+ return (
+ self.frequencies, # type: ignore
+ self.residuals.real * 100, # type: ignore
+ self.residuals.imag * 100, # type: ignore
+ )
+
+ def get_time_constants(self) -> TimeConstants:
+ """
+ Get the time constants that were used during fitting.
+
+ Returns
+ -------
+ |TimeConstants|
+ """
+ time_constants: List[float] = []
+
+ element: Element
+ for element in self.circuit.get_elements(recursive=True):
+ if not (
+ isinstance(element, KramersKronigRC)
+ or isinstance(element, KramersKronigAdmittanceRC)
+ ):
+ continue
+
+ time_constants.append(element.get_value(key="tau"))
+
+ return array(sorted(time_constants))
+
+ def get_log_F_ext(self) -> float:
+ """
+ Get the value of |log F_ext|, which affects the range of time constants.
+
+ Returns
+ -------
+ float
+ """
+ time_constants: TimeConstants = self.time_constants
+ if len(time_constants) < 2:
+ return 0.0
+
+ minimum_time_constant: float64 = min(time_constants)
+ maximum_time_constant: float64 = max(time_constants)
+
+ f: Frequencies = self.get_frequencies()
+ low_end_extension: float64 = log(1 / (2 * pi * max(f))) - log(
+ minimum_time_constant
+ )
+ high_end_extension: float64 = log(maximum_time_constant) - log(
+ 1 / (2 * pi * min(f))
+ )
+
+ if not isclose(low_end_extension, high_end_extension):
+ raise ValueError(f"Expected {low_end_extension=} ≃ {high_end_extension=}")
+
+ return float(low_end_extension)
+
+ def perform_lilliefors_test(self) -> Tuple[float, float]:
+ """
+ Perform the Lilliefors test for normality on the residuals of the real and imaginary parts.
+
+ Returns
+ -------
+ Tuple[float, float]
+ The p-values for the tests performed on the residuals of the real and imaginary parts.
+ The null hypothesis is that the residuals come from a normal distribution.
+ """
+ from statsmodels.stats.diagnostic import lilliefors
+
+ real: NDArray[float64]
+ imag: NDArray[float64]
+ _, real, imag = self.get_residuals_data()
+
+ return tuple(
+ map(
+ lambda samples: lilliefors(samples)[1],
+ (real, imag),
+ )
+ )
+
+ def perform_shapiro_wilk_test(self) -> Tuple[float, float]:
+ """
+ Perform the Shapiro-Wilk test for normality on the residuals of the real and imaginary parts.
+
+ Returns
+ -------
+ Tuple[float, float]
+ The p-values for the tests performed on the residuals of the real and imaginary parts.
+ The null hypothesis is that the residuals come from a normal distribution.
+ """
+ from scipy.stats import shapiro
+
+ real: NDArray[float64]
+ imag: NDArray[float64]
+ _, real, imag = self.get_residuals_data()
+
+ return tuple(
+ map(
+ lambda samples: shapiro(samples)[1],
+ (real, imag),
+ )
+ )
+
+ def perform_kolmogorov_smirnov_test(
+ self,
+ standard_deviation: float = 0.0,
+ ) -> Tuple[float, float]:
+ """
+ Perform one-sample Kolmogorov-Smirnov test on the residuals of the real and imaginary parts.
+ The residuals are tested against a normal distribution with a mean that is assumed to be zero and a standard deviation that can either be provided or estimated automatically.
+
+ Parameters
+ ----------
+ standard_deviation: float, optional
+ If greater than zero, then the provided value is used.
+ Otherwise, the standard deviation estimated based on the pseudo chi-squared and the number of frequencies is used.
+
+ Returns
+ -------
+ Tuple[float, float]
+ The p-values for the tests performed on the residuals of the real and imaginary parts.
+ The null hypothesis is that the distributions of the residuals are identical to the normal distribution with a mean of zero and the provided (or estimated) standard deviation.
+ """
+ from scipy.stats import kstest
+
+ if not _is_floating(standard_deviation):
+ raise TypeError(f"Expected a float instead of {standard_deviation=}")
+ elif standard_deviation <= 0.0:
+ standard_deviation = self.get_estimated_percent_noise()
+
+ real: NDArray[float64]
+ imag: NDArray[float64]
+ _, real, imag = self.get_residuals_data()
+
+ return tuple(
+ map(
+ lambda samples: kstest(
+ rvs=samples,
+ cdf="norm",
+ args=(0.0, standard_deviation),
+ ).pvalue,
+ (real, imag),
+ )
+ )
+
+ def _calculate_residuals_statistics(self, level: int) -> Dict[str, float]:
+ results: Dict[str, List[float]] = {}
+
+ real: NDArray[float64]
+ imag: NDArray[float64]
+ _, real, imag = self.get_residuals_data()
+ order: List[Tuple[NDArray[float64], str]] = [
+ (real, "real"),
+ (imag, "imag."),
+ ]
+
+ lilliefors: Tuple[float, float] = self.perform_lilliefors_test()
+ shapiro_wilk: Tuple[float, float] = self.perform_shapiro_wilk_test()
+ kolmogorov_smirnov: Tuple[float, float] = self.perform_kolmogorov_smirnov_test()
+
+ samples: NDArray[float64]
+ if level >= 1:
+ for samples, _ in order:
+ label: str = "Mean of residuals, %part% (% of |Z|)"
+ if label not in results:
+ results[label] = []
+
+ sample_mean: float64 = mean(samples)
+ results[label].append(sample_mean)
+
+ sample_sd: float64 = std(samples, ddof=1)
+ label = "SD of residuals, %part% (% of |Z|)"
+ if label not in results:
+ results[label] = []
+
+ results[label].append(sample_sd)
+
+ i: int
+ for i in range(0, 3):
+ label = f"Residuals within {i + 1} SD, %part% (%)"
+ if label not in results:
+ results[label] = []
+
+ pct: float = (
+ array_sum(
+ logical_and(
+ samples < sample_mean + (i + 1) * sample_sd,
+ samples > sample_mean - (i + 1) * sample_sd,
+ )
+ )
+ / len(samples)
+ * 100
+ )
+ results[label].append(pct)
+
+ if level >= 2:
+ p: float
+ for p in lilliefors:
+ label = "Lilliefors test p-value, %part%"
+ if label not in results:
+ results[label] = []
+ results[label].append(p)
+
+ for p in shapiro_wilk:
+ label = "Shapiro-Wilk test p-value, %part%"
+ if label not in results:
+ results[label] = []
+ results[label].append(p)
+
+ if level >= 3:
+ for p in kolmogorov_smirnov:
+ label = "One-sample Kolmogorov-Smirnov test p-value, %part%"
+ if label not in results:
+ results["Estimated SD of Gaussian noise (% of |Z|)"] = (
+ self.get_estimated_percent_noise()
+ )
+ results[label] = []
+ results[label].append(p)
+
+ output: Dict[str, str] = {}
+
+ for key in results.keys():
+ if isinstance(results[key], list):
+ value: Union[str, float]
+ for i, value in enumerate(results[key]):
+ if "%part%" not in key:
+ raise ValueError(
+ f"Expected the substring '%part%' to exist in {key=}"
+ )
+
+ label = key.replace("%part%", order[i][1])
+ output[label] = value
+ else:
+ value = results[key]
+ output[key] = value
+
+ if not all(map(lambda key: isinstance(key, str), output.keys())):
+ raise TypeError(f"Expected only string keys in {output=}")
+
+ if not all(map(lambda value: _is_floating(value), output.values())):
+ raise TypeError(f"Expected only float values in {output=}")
+
+ return output
+
+ def to_statistics_dataframe(
+ self,
+ extended_statistics: int = 3,
+ ) -> "DataFrame": # noqa: F821
+ r"""
+ Get the statistics related to the test as a |DataFrame| object.
+
+ Parameters
+ ----------
+ extended_statistics: int, optional
+ Include different amounts of additional statistics depending on the chosen level.
+ Level 1 includes:
+
+ - The estimated equivalent standard deviation of a Gaussian noise calculated based on the pseudo chi-squared value assuming that the noise in the real and imaginary parts of the impedance are independent, have a Gaussian distribution, a mean of zero, and the same standard deviation.
+ - The means of the real and imaginary residuals.
+ - The sample standard deviations of the real and imaginary residuals.
+ - The percentage of points found within 1, 2, or 3 standard deviations.
+
+ Level 2 includes:
+
+ - The p-values of normality tests performed on the real or imaginary residuals. These tests include: Lilliefors and Shapiro-Wilk.
+
+ Level 3 includes:
+
+ - The p-values for one-sample Kolmogorov-Smirnov tests comparing the real or imaginary residuals against a normal distribution with a mean of zero and a standard deviation (as a percentage of :math:`|Z|`) equal to the approximation obtained with :math:`{\rm SD}_{\rm est} \approx \sqrt{\chi^2_{\rm ps} \times 5000 / N_\omega}` where :math:`\chi^2_{\rm ps}` is the pseudo chi-squared value of the fit and :math:`N_\omega` is the number of excitation frequencies. This approximation assumes that the error is spread evenly across the real and imaginary parts of the immittance spectrum.
+
+ Returns
+ -------
+ |DataFrame|
+ """
+ from pandas import DataFrame
+
+ if not _is_integer(extended_statistics):
+ raise TypeError(f"Expected an integer instead of {extended_statistics=}")
+ elif not (0 <= extended_statistics <= 3):
+ raise ValueError(
+ f"Expected an integer in the range [0, 3] instead of {extended_statistics=}"
+ )
+
+ statistics: Dict[str, Union[int, float, str]] = {
+ "Log pseudo chi-squared": log(self.pseudo_chisqr),
+ "Number of RC elements": self.num_RC,
+ }
+
+ statistics["Log Fext (extension factor for time constant range)"] = (
+ self.log_F_ext
+ )
+
+ R: float = (
+ self.parallel_resistance if self.admittance else self.series_resistance
+ )
+ C: float = (
+ self.parallel_capacitance if self.admittance else self.series_capacitance
+ )
+ L: float = (
+ self.parallel_inductance if self.admittance else self.series_inductance
+ )
+
+ connection_type: str = "Parallel" if self.admittance else "Series"
+
+ if not isnan(R):
+ statistics[f"{connection_type} resistance (ohm)"] = R
+
+ if not isnan(C):
+ statistics[f"{connection_type} capacitance (F)"] = C
+
+ if not isnan(L):
+ statistics[f"{connection_type} inductance (H)"] = L
+
+ if extended_statistics > 0:
+ statistics.update(
+ self._calculate_residuals_statistics(level=extended_statistics)
+ )
+
+ return DataFrame.from_dict(
+ {
+ "Label": list(statistics.keys()),
+ "Value": list(statistics.values()),
+ }
+ )
+
+ def get_series_resistance(self) -> float:
+ """
+ Get the value of the series resistance.
+
+ Returns
+ -------
+ float
+ """
+ if not self.admittance:
+ series: Series = self.circuit.get_connections(recursive=False)[0]
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
+ for element in series.get_elements(recursive=False):
+ if isinstance(element, Resistor):
+ return element.get_value("R")
+
+ return nan
+
+ def get_series_capacitance(self) -> float:
+ """
+ Get the value of the series capacitance (or numpy.nan if not included in the circuit).
+
+ Returns
+ -------
+ float
+ """
+ if not self.admittance:
+ series: Series = self.circuit.get_connections(recursive=False)[0]
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
+ for element in series.get_elements(recursive=False):
+ if isinstance(element, Capacitor):
+ return element.get_value("C")
+
+ return nan
+
+ def get_series_inductance(self) -> float:
+ """
+ Get the value of the series inductance (or numpy.nan if not included in the circuit).
+
+ Returns
+ -------
+ float
+ """
+ if not self.admittance:
+ series: Series = self.circuit.get_connections(recursive=False)[0]
+ if not isinstance(series, Series):
+ raise TypeError(f"Expected a Series instead of {series=}")
+
+ for element in series.get_elements(recursive=False):
+ if isinstance(element, Inductor):
+ return element.get_value("L")
+
+ return nan
+
+ def get_parallel_resistance(self) -> float:
+ """
+ Get the value of the parallel resistance.
+
+ Returns
+ -------
+ float
+ """
+ if self.admittance:
+ parallel: Parallel = self.circuit.get_connections(recursive=True)[1]
+ if not isinstance(parallel, Parallel):
+ raise TypeError(f"Expected a Parallel instead of {parallel=}")
+
+ for element in parallel.get_elements(recursive=False):
+ if isinstance(element, Resistor):
+ return element.get_value("R")
+
+ return nan
+
+ def get_parallel_capacitance(self) -> float:
+ """
+ Get the value of the parallel capacitance (or numpy.nan if not included in the circuit).
+
+ Returns
+ -------
+ float
+ """
+ if self.admittance:
+ parallel: Parallel = self.circuit.get_connections(recursive=True)[1]
+ if not isinstance(parallel, Parallel):
+ raise TypeError(f"Expected a Parallel instead of {parallel=}")
+
+ for element in parallel.get_elements(recursive=False):
+ if isinstance(element, Capacitor):
+ return element.get_value("C")
+
+ return nan
+
+ def get_parallel_inductance(self) -> float:
+ """
+ Get the value of the parallel inductance (or numpy.nan if not included in the circuit).
+
+ Returns
+ -------
+ float
+ """
+ if self.admittance:
+ parallel: Parallel = self.circuit.get_connections(recursive=True)[1]
+ if not isinstance(parallel, Parallel):
+ raise TypeError(f"Expected a Parallel instead of {parallel=}")
+
+ for element in parallel.get_elements(recursive=False):
+ if isinstance(element, Inductor):
+ return element.get_value("L")
+
+ return nan
+
+ def get_estimated_percent_noise(self) -> float:
+ r"""
+ Estimate the amount of noise (as a percentage of :math:`|Z|`) using the approximation :math:`{\rm SD}_{\rm est} \approx \sqrt{\chi^2_{\rm ps} \times 5000 / N_\omega}` where :math:`\chi^2_{\rm ps}` is the pseudo chi-squared value of the fit and :math:`N_\omega` is the number of excitation frequencies. This approximation assumes that the error is spread evenly across the real and imaginary parts of the immittance spectrum.
+
+ References:
+
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Returns
+ -------
+ float
+ """
+ return _estimate_pct_noise(self.impedances, self.pseudo_chisqr)
diff --git a/src/pyimpspec/analysis/kramers_kronig/single.py b/src/pyimpspec/analysis/kramers_kronig/single.py
new file mode 100644
index 0000000..e8473a9
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/single.py
@@ -0,0 +1,206 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ _is_boolean,
+ _is_integer,
+)
+from pyimpspec.data import DataSet
+from .result import KramersKronigResult
+from .algorithms import (
+ suggest_num_RC,
+ suggest_representation,
+)
+from .exploratory import evaluate_log_F_ext
+
+
+def perform_kramers_kronig_test(
+ data: DataSet,
+ test: str = "real",
+ num_RC: int = 0,
+ add_capacitance: bool = True,
+ add_inductance: bool = True,
+ admittance: Optional[bool] = None,
+ min_log_F_ext: float = -1.0,
+ max_log_F_ext: float = 1.0,
+ log_F_ext: float = 0.0,
+ num_F_ext_evaluations: int = 20,
+ rapid_F_ext_evaluations: bool = True,
+ cnls_method: str = "leastsq",
+ max_nfev: int = 0,
+ timeout: int = 60,
+ num_procs: int = -1,
+ **kwargs,
+) -> KramersKronigResult:
+ """
+ Performs linear Kramers-Kronig tests, attempts to automatically find a suitable extension of the time constant range, optionally suggests the appropriate immittance representation to test, and automatically suggests the optimal number of RC elements to use.
+ The results can be used to check the validity of an impedance spectrum before performing equivalent circuit fitting.
+ This function acts as a wrapper for |evaluate_log_F_ext|, |suggest_num_RC_limits|, |suggest_num_RC|, and |suggest_representation|.
+
+ References:
+
+ - B.A. Boukamp, 1995, J. Electrochem. Soc., 142, 1885-1894 (https://doi.org/10.1149/1.2044210)
+ - V. Yrjänä and J. Bobacka, 2024, Electrochim. Acta, 504, 144951 (https://doi.org/10.1016/j.electacta.2024.144951)
+
+ Parameters
+ ----------
+ data: DataSet
+ The data set to be tested.
+
+ test: str, optional
+ Supported values include "complex", "imaginary", "real", "complex-inv", "imaginary-inv", "real-inv", and "cnls".
+ The first three correspond to the complex, imaginary, real tests, respectively, described by Boukamp (1995).
+ These three implementations use least squares fitting (see `numpy.linalg.lstsq `).
+ The implementations ending with "-inv" use matrix inversion, which was the default in pyimpspec prior to version 5.0.0.
+ The "cnls" implementation uses complex non-linear least squares fitting.
+
+ num_RC: int, optional
+ The number of RC elements to use.
+ A value greater than or equal to one results in the specific number of RC elements being tested.
+ Otherwise, the number of RC elements is determined automatically.
+
+ add_capacitance: bool, optional
+ Add an additional capacitance in series (or in parallel if ``admittance=True``) with the rest of the circuit.
+
+ add_inductance: bool, optional
+ Add an additional inductance in series (or in parallel if ``admittance=True``) with the rest of the circuit.
+
+ admittance: Optional[bool], optional
+ If True, then perform the test(s) using the admittance data (:math:`Y = \\frac{1}{Z}`) instead of the impedance data (:math:`Z`).
+ Each representation uses a different equivalent circuit model: Fig. 1 for impedance and Fig. 13 for admittance (Boukamp, 1995).
+ Operating on the admittance data may be necessary in some cases such as when there is a negative differential resistance.
+ If set to None, then both representations are used and the |suggest_representation| is used to pick one.
+
+ min_log_F_ext: float, optional
+ The lower limit for |log F_ext|, which extends or contracts the range of time constants.
+
+ max_log_F_ext: float, optional
+ The upper limit for |log F_ext|, which extends or contracts the range of time constants.
+
+ log_F_ext: float, optional
+ If ``num_F_ext_evaluations == 0``, then ``log_F_ext`` is used directly as the value for |log F_ext|.
+
+ num_F_ext_evaluations: int, optional
+ The maximum number of evaluations to perform when trying to automatically estimate the optimal |log F_ext|.
+ Values greater than zero cause an approach based on splitting the range of logarithmic extensions into evenly spaced parts, estimating where the minimum is, and evaluating additional points near that minimum.
+ Values less than zero cause an approach based on using the differential evolution algorithm to find the minimum.
+ A value of zero causes ``log_F_ext`` to be used directly as |log F_ext|.
+
+ rapid_F_ext_evaluations: bool, optional
+ If possible, minimize the number of time constants that are tested when evaluating extensions in order to perform the optimization faster.
+
+ cnls_method: str, optional
+ The iterative method used to perform the fitting.
+ Only relevant when performing "cnls" tests.
+
+ max_nfev: int, optional
+ The maximum number of function evaluations.
+ If less than one, then no limit is imposed.
+ Only relevant when performing "cnls" tests.
+
+ timeout: int, optional
+ The maximum amount of time in seconds to spend performing tests.
+ Only relevant when performing "cnls" tests.
+
+ num_procs: int, optional
+ The number of parallel processes to use when performing tests.
+ Only relevant when performing "cnls" tests.
+
+ **kwargs
+ Additional keyword arguments are passed on to the algorithms that are used when automatically determining an optimal number of RC elements.
+
+ Returns
+ -------
+ KramersKronigResult
+ A single linear Kramers-Kronig test result representing both the optimal extension of the range of time constants and the optimal number of RC elements (i.e., time constants).
+ """
+ if not _is_integer(num_RC):
+ raise TypeError(f"Expected an integer instead of {num_RC=}")
+
+ if not (_is_boolean(admittance) or admittance is None):
+ raise TypeError(f"Expected a boolean or None instead of {admittance=}")
+
+ options: List[bool] = [False, True] if admittance is None else [admittance]
+ results: List[Union[KramersKronigResult, Tuple[KramersKronigResult, Dict[int, float], int, int]]] = []
+ err: Optional[Exception] = None
+
+ for admittance in options:
+ try:
+ log_F_ext_evaluations: List[Tuple[float, List[KramersKronigResult], float]]
+ log_F_ext_evaluations = evaluate_log_F_ext(
+ data=data,
+ test=test,
+ num_RCs=[num_RC] if num_RC > 0 else None,
+ add_capacitance=add_capacitance,
+ add_inductance=add_inductance,
+ admittance=admittance,
+ min_log_F_ext=min_log_F_ext,
+ max_log_F_ext=max_log_F_ext,
+ log_F_ext=log_F_ext,
+ num_F_ext_evaluations=num_F_ext_evaluations,
+ rapid_F_ext_evaluations=rapid_F_ext_evaluations,
+ cnls_method=cnls_method,
+ max_nfev=max_nfev,
+ timeout=timeout,
+ num_procs=num_procs,
+ )
+ tests: List[KramersKronigResult] = log_F_ext_evaluations[0][1]
+ except ValueError as e:
+ err = e
+ continue
+
+ if num_RC > 0:
+ if not (tests[0].num_RC <= num_RC <= tests[-1].num_RC):
+ raise ValueError(
+ f"Expected the specified number of RC elements to be {tests[0].num_RC} <= {num_RC=} <= {tests[-1].num_RC}"
+ )
+
+ results.append(tests[0])
+ else:
+ results.append(suggest_num_RC(tests, **kwargs))
+
+ if len(results) == 0:
+ if err is not None:
+ raise err
+ else:
+ raise ValueError(f"Expected to have at least one item in {results=}")
+
+ if num_RC > 0:
+ if not all(map(lambda t: isinstance(t, KramersKronigResult), results)):
+ raise TypeError(f"Expected only KramersKronigResult instances instead of {results=}")
+
+ if len(results) == 1:
+ return results[0]
+ else:
+ return min(results, key=lambda t: t.pseudo_chisqr)
+
+ if not all(map(lambda t: isinstance(t, tuple), results)):
+ raise TypeError(f"Expected only tuples instead of {results=}")
+ elif not all(map(lambda t: len(t) == 4, results)):
+ raise ValueError(f"Expected tuples with four items instead of {results=}")
+
+ if len(results) == 1:
+ return results[0][0]
+ else:
+ return suggest_representation(results)[0]
diff --git a/src/pyimpspec/analysis/kramers_kronig/utility.py b/src/pyimpspec/analysis/kramers_kronig/utility.py
new file mode 100644
index 0000000..fb955ea
--- /dev/null
+++ b/src/pyimpspec/analysis/kramers_kronig/utility.py
@@ -0,0 +1,130 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+from numpy import (
+ array,
+ complex128,
+ float64,
+ inf,
+ int64,
+ log10 as log,
+ max,
+ min,
+ sqrt,
+)
+from numpy.typing import NDArray
+from pyimpspec.circuit import parse_cdc
+from pyimpspec.circuit.base import Element
+from pyimpspec.circuit.circuit import (
+ Circuit,
+ Series,
+ Parallel,
+)
+from pyimpspec.circuit.resistor import Resistor
+from pyimpspec.circuit.capacitor import Capacitor
+from pyimpspec.circuit.inductor import Inductor
+from pyimpspec.circuit.kramers_kronig import (
+ KramersKronigAdmittanceRC,
+ KramersKronigRC,
+)
+from pyimpspec.typing import ComplexImpedances
+from pyimpspec.typing.helpers import List
+
+
+def _generate_time_constants(
+ w: NDArray[float64],
+ num_RC: int,
+ log_F_ext: float,
+) -> NDArray[float64]:
+ # Calculate time constants according to eq. 12 from
+ # Schönleber et al. (2014) and eq. 18 in Boukamp (1995).
+ if num_RC < 2:
+ raise ValueError(
+ f"Expected an integer greater than or equal to 2 instead of {num_RC=}"
+ )
+
+ F_ext: float = 10**log_F_ext
+ tau_min: float64 = 1 / (max(w) * F_ext)
+ tau_max: float64 = F_ext / min(w)
+ k: NDArray[int64] = array(list(range(1, num_RC + 1)))
+
+ return 10 ** (log(tau_min) + (k - 1) / (num_RC - 1) * log(tau_max / tau_min))
+
+
+def _generate_circuit(
+ taus: NDArray[float64],
+ add_capacitance: bool,
+ add_inductance: bool,
+ admittance: bool,
+) -> Circuit:
+ elements: List[Element] = []
+ elements.append(Resistor(R=1).set_lower_limits(R=-inf).set_upper_limits(R=inf))
+
+ t: float
+ for t in taus:
+ if admittance:
+ elements.append(KramersKronigAdmittanceRC(tau=t))
+ else:
+ elements.append(KramersKronigRC(tau=t))
+
+ if add_capacitance:
+ elements.append(Capacitor(C=1e-6).set_lower_limits(C=-inf).set_upper_limits(C=inf))
+
+ if add_inductance:
+ elements.append(Inductor(L=1e-3).set_lower_limits(L=-inf).set_upper_limits(L=inf))
+
+ if admittance:
+ return Circuit(Parallel(elements))
+
+ return Circuit(Series(elements))
+
+
+def _boukamp_weight(
+ Z: ComplexImpedances,
+ admittance: bool,
+) -> NDArray[float64]:
+ # See eq. 13 in Boukamp (1995)
+ if admittance:
+ Y: NDArray[complex128] = 1 / Z
+ return (Y.real**2 + Y.imag**2) ** -1
+
+ return (Z.real**2 + Z.imag**2) ** -1 # type: ignore
+
+
+def _estimate_pseudo_chisqr(Z: ComplexImpedances, pct_noise: float) -> float:
+ # Assumes that two uncorrelated and normally distributed noises with the
+ # same standard deviation were added to the real and imaginary parts of
+ # the impedance spectrum.
+ return len(Z) * pct_noise**2 / 5000
+
+
+def _estimate_pct_noise(Z: ComplexImpedances, pseudo_chisqr: float) -> float:
+ # See _estimate_pseudo_chisqr
+ return sqrt(5000 * pseudo_chisqr / len(Z))
+
+
+def _format_log_F_ext_for_latex(log_F_ext: float) -> str:
+ value: str = f"{log_F_ext:.3g}"
+ if "e" in value:
+ coefficient: str
+ exponent: str
+ coefficient, exponent = value.split("e")
+ value = coefficient + r" \times 10^{" + str(int(exponent)) + "}"
+
+ return value
diff --git a/src/pyimpspec/analysis/utility.py b/src/pyimpspec/analysis/utility.py
index af4e297..78cfae4 100644
--- a/src/pyimpspec/analysis/utility.py
+++ b/src/pyimpspec/analysis/utility.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -30,44 +30,56 @@
__config__ as numpy_config,
array,
ceil,
- empty,
float64,
floor,
int64,
integer,
isinf,
- issubdtype,
log10 as log,
logspace,
- ndarray,
sum as array_sum,
)
from numpy.typing import NDArray
from pyimpspec.typing import (
ComplexImpedances,
- ComplexResidual,
ComplexResiduals,
Frequencies,
Frequency,
)
+from pyimpspec.typing.helpers import (
+ _cast_to_floating_array,
+ _is_complex_array,
+ _is_floating_array,
+ _is_integer,
+)
def _interpolate(
experimental: Frequencies,
- num_per_decade: int,
+ num_per_decade: integer,
) -> Frequencies:
- if not isinstance(experimental, ndarray):
- experimental = array(experimental, dtype=Frequency)
- assert (
- issubdtype(type(num_per_decade), integer) and num_per_decade > 0
- ), num_per_decade
+ if not _is_floating_array(experimental):
+ experimental = _cast_to_floating_array(experimental)
+
+ if not _is_integer(num_per_decade):
+ raise TypeError(f"Expected an integer instead of {num_per_decade=}")
+ elif num_per_decade <= 0:
+ raise ValueError(
+ f"Expected an integer greater than zero instead of {num_per_decade=}"
+ )
+
min_f: float64 = min(experimental)
max_f: float64 = max(experimental)
- assert 0.0 < min_f < max_f
- assert not isinf(max_f)
+ if not (0.0 < min_f < max_f):
+ raise ValueError(
+ f"Expected 0.0 < min_f < max_f instead of {min_f=} and {max_f=}"
+ )
+ elif isinf(max_f):
+ raise ValueError(f"Expected max_f < inf instead of {max_f=}")
+
log_min_f: int64 = int64(floor(log(min_f)))
log_max_f: int64 = int64(ceil(log(max_f)))
- f: float64
+
freq: List[float64] = [
f
for f in logspace(
@@ -75,10 +87,13 @@ def _interpolate(
)
if f >= min_f and f <= max_f
]
+
if min_f not in freq:
freq.append(min_f)
+
if max_f not in freq:
freq.append(max_f)
+
return array(list(sorted(freq, reverse=True)), dtype=Frequency)
@@ -86,15 +101,17 @@ def _calculate_residuals(
Z_exp: ComplexImpedances,
Z_fit: ComplexImpedances,
) -> ComplexResiduals:
- residuals: ComplexResiduals = empty(Z_exp.shape, dtype=ComplexResidual)
- residuals.real = (Z_exp.real - Z_fit.real) / abs(Z_exp)
- residuals.imag = (Z_exp.imag - Z_fit.imag) / abs(Z_exp)
- return residuals
+ # Eqs. 15 and 16 from Schönleber et al., 2014.
+ # DOI:10.1016/j.electacta.2014.01.034
+ return (Z_exp - Z_fit) / abs(Z_exp)
def _boukamp_weight(Z_exp: ComplexImpedances) -> NDArray[float64]:
- assert isinstance(Z_exp, ndarray), Z_exp
- # See eq. 13 in Boukamp (1995)
+ if not _is_complex_array(Z_exp):
+ raise TypeError(f"Expected an array of complex values instead of {Z_exp=}")
+
+ # Eq. 13 in Boukamp, 1995.
+ # DOI:10.1149/1.2044210
return (Z_exp.real**2 + Z_exp.imag**2) ** -1 # type: ignore
@@ -103,12 +120,20 @@ def _calculate_pseudo_chisqr(
Z_fit: ComplexImpedances,
weight: Optional[NDArray[float64]] = None,
) -> float:
- assert isinstance(Z_exp, ndarray), Z_exp
- assert isinstance(Z_fit, ndarray), Z_fit
- assert isinstance(weight, ndarray) or weight is None, weight
+ if not _is_complex_array(Z_exp):
+ raise TypeError(f"Expected an array of complex values instead of {Z_exp=}")
+
+ if not _is_complex_array(Z_fit):
+ raise TypeError(f"Expected an array of complex values instead of {Z_fit=}")
+
+ if not (_is_floating_array(weight) or weight is None):
+ raise TypeError(f"Expected None or an array of floats instead of {weight=}")
+
if weight is None:
weight = _boukamp_weight(Z_exp)
- # See eq. 14 in Boukamp (1995)
+
+ # Eq. 14 in Boukamp, 1995.
+ # DOI:10.1149/1.2044210
return float(
array_sum(
weight * ((Z_exp.real - Z_fit.real) ** 2 + (Z_exp.imag - Z_fit.imag) ** 2)
@@ -130,7 +155,9 @@ def _set_default_num_procs(num_procs: int):
If the value is greater than zero, then the value is used as the number of processes to use.
Otherwise, any previous override is disabled.
"""
- assert issubdtype(type(num_procs), integer), num_procs
+ if not _is_integer(num_procs):
+ raise TypeError(f"Expected an integer instead of {num_procs=}")
+
global NUM_PROCS_OVERRIDE
NUM_PROCS_OVERRIDE = num_procs
@@ -138,13 +165,13 @@ def _set_default_num_procs(num_procs: int):
def _get_default_num_procs() -> int:
"""
Get the default number of parallel processes that pyimpspec would try to use.
- NumPy may be using libraries that multithreaded, which can lead to poor performance or system responsiveness when combined with pyimpspec's use of multiple processes.
+ NumPy may be using libraries that are multithreaded, which can lead to poor performance or system responsiveness when combined with pyimpspec's use of multiple processes.
This function attempts to return a reasonable number of processes depending on the detected libraries (and relevant environment variables):
- OpenBLAS (``OPENBLAS_NUM_THREADS``)
- MKL (``MKL_NUM_THREADS``)
- If none the libraries listed above are detected because some other library is used, then the value returned by ``multiprocessing.cpu_count()`` is used.
+ If none of the libraries listed above are detected because some other library is used, then the value returned by ``multiprocessing.cpu_count()`` is used.
Returns
-------
@@ -153,6 +180,7 @@ def _get_default_num_procs() -> int:
if NUM_PROCS_OVERRIDE > 0:
return NUM_PROCS_OVERRIDE
num_cores: int = cpu_count()
+
multithreaded: Dict[str, str] = {
"openblas": "OPENBLAS_NUM_THREADS",
"mkl": "MKL_NUM_THREADS",
diff --git a/src/pyimpspec/analysis/zhit/__init__.py b/src/pyimpspec/analysis/zhit/__init__.py
index 627485e..6a5edb3 100644
--- a/src/pyimpspec/analysis/zhit/__init__.py
+++ b/src/pyimpspec/analysis/zhit/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -28,12 +28,10 @@
)
from numpy import (
angle,
+ complex128,
float64,
- integer,
- issubdtype,
log as ln,
log10 as log,
- ndarray,
pi,
)
from numpy.typing import NDArray
@@ -45,6 +43,12 @@
Phases,
Residuals,
)
+from pyimpspec.typing.helpers import (
+ _is_boolean,
+ _is_floating,
+ _is_floating_array,
+ _is_integer,
+)
from pyimpspec.data import DataSet
from pyimpspec.analysis.utility import (
_calculate_residuals,
@@ -202,7 +206,7 @@ def to_statistics_dataframe(self) -> "DataFrame": # noqa: F821
def perform_zhit(
data: DataSet,
- smoothing: str = "lowess",
+ smoothing: str = "modsinc",
interpolation: str = "akima",
window: str = "auto",
num_points: int = 3,
@@ -211,7 +215,8 @@ def perform_zhit(
center: float = 1.5,
width: float = 3.0,
weights: Optional[NDArray[float64]] = None,
- num_procs: int = 0,
+ admittance: bool = False,
+ num_procs: int = -1,
) -> ZHITResult:
r"""
Performs a reconstruction of the modulus data of an impedance spectrum based on the phase data of that impedance spectrum using the Z-HIT algorithm described by Ehm et al. (2000).
@@ -233,8 +238,7 @@ def perform_zhit(
The data set for which the modulus of the impedance should be reconstructed.
smoothing: str, optional
- The type of smoothing to apply: "none", "lowess" (`Locally Weighted Scatterplot Smoothing `_), "savgol" (`Savitzky-Golay `_), or "auto".
- Savitzky-Golay may produce better results than LOWESS but it expects equally spaced points.
+ The type of smoothing to apply: "none", "lowess" (`Locally Weighted Scatterplot Smoothing `_), "modsinc" (`modified sinc kernel` `_), "savgol" (`Savitzky-Golay `_), "whithend" (`Whittaker-Henderson `_) or "auto".
interpolation: str, optional
The type of interpolation to apply: "akima" (`Akima spline `_), "cubic" (`cubic spline `_), "pchip" (`Piecewise Cubic Hermite Interpolating Polynomial `_), or "auto".
@@ -246,7 +250,7 @@ def perform_zhit(
The number of points to take into account while smoothing any given point.
polynomial_order: int, optional
- The order of the polynomial used when smoothing (Savitzky-Golay only).
+ The order of the polynomial used when smoothing (Savitzky-Golay and Whittaker-Henderson only).
num_iterations: int, optional
The number of iterations to perform while smoothing (LOWESS only).
@@ -260,6 +264,9 @@ def perform_zhit(
weights: Optional[NDArray[float64]], optional
If the desired weights can not be implemented using the ``window``, ``center``, and ``width`` parameters, then this parameter can be used to provide custom weights.
+ admittance: bool, optional
+ Use the admittance representation of the data instead of the impedance representation.
+
num_procs: int, optional
The maximum number of parallel processes to use when smoothing algorithm, interpolation spline, and/or window function are set to "auto".
A value less than 1 results in an attempt to figure out a suitable value based on, e.g., the number of cores detected.
@@ -269,50 +276,80 @@ def perform_zhit(
-------
ZHITResult
"""
- assert hasattr(data, "get_frequencies") and callable(data.get_frequencies)
- assert hasattr(data, "get_impedances") and callable(data.get_impedances)
- assert isinstance(smoothing, str)
- assert isinstance(interpolation, str)
- assert isinstance(num_points, int)
- assert isinstance(polynomial_order, int)
- assert isinstance(num_iterations, int)
- assert isinstance(window, str)
- assert isinstance(center, float)
- assert isinstance(width, float)
- assert weights is None or isinstance(weights, ndarray)
- assert issubdtype(type(num_procs), integer), (
- type(num_procs),
- num_procs,
- )
+ if not isinstance(smoothing, str):
+ raise TypeError(f"Expected a string instead of {smoothing=}")
+
+ if not isinstance(interpolation, str):
+ raise TypeError(f"Expected a string instead of {interpolation=}")
+
+ if not _is_integer(num_points):
+ raise TypeError(f"Expected an integer instead of {num_points=}")
+ elif num_points < 1:
+ raise ValueError(f"Expected {num_points=} > 0")
+
+ if not _is_integer(polynomial_order):
+ raise TypeError(f"Expected an integer instead of {polynomial_order=}")
+
+ if not _is_integer(num_iterations):
+ raise TypeError(f"Expected an integer instead of {num_iterations=}")
+ elif num_iterations < 1:
+ raise ValueError(f"Expected {num_iterations=} > 0")
+
+ if not isinstance(window, str):
+ raise TypeError(f"Expected a string instead of {window=}")
+
+ if not _is_floating(center):
+ raise TypeError(f"Expected a float instead of {center=}")
+
+ if not _is_floating(width):
+ raise TypeError(f"Expected a float instead of {width=}")
+ elif width <= 0.0:
+ raise ValueError(f"Expected {width=} > 0.0")
+
+ if not (weights is None or _is_floating_array(weights)):
+ raise TypeError(f"Expected an array of floats or None instead of {weights=}")
+
+ if not _is_boolean(admittance):
+ raise TypeError(f"Expected a boolean instead of {admittance=}")
+
+ if not _is_integer(num_procs):
+ raise TypeError(f"Expected an integer instead of {num_procs=}")
+
if num_procs < 1:
- num_procs = _get_default_num_procs() - abs(num_procs)
- if num_procs < 1:
- num_procs = 1
- if num_points < 1:
- raise ZHITError("The number of points must be greater than 0!")
- if smoothing == "auto" or smoothing == "savgol":
+ num_procs = max((_get_default_num_procs() - abs(num_procs), 1))
+
+ if smoothing in ("auto", "savgol", "whithend"):
if num_points < 2:
- raise ZHITError("The number of points must be greater than 1!")
+ raise ValueError(f"Expected {num_points=} > 1")
if not (0 < polynomial_order < num_points):
- raise ZHITError(
- "The polynomial order must be a value between 0 and the "
- f"number of points (i.e., {num_points} in this case)!"
- )
- if num_iterations < 1:
- raise ZHITError("The number of iterations must be greater than 0!")
- if width <= 0.0:
- raise ZHITError("The window width must be greater than 0.0!")
+ raise ValueError(f"Expected 0 < {polynomial_order=} < {num_points=}")
+
if len(_WINDOW_FUNCTIONS) == 0:
_initialize_window_functions()
+
f: Frequencies = data.get_frequencies()
+ if not (len(f) > 0):
+ raise ValueError(
+ f"There are no unmasked data points in the '{data.get_label()}' data set parsed from '{data.get_path()}'"
+ )
+
log_f: NDArray[float64] = log(f)
ln_omega: NDArray[float64] = ln(2 * pi * f)
- Z_exp: ComplexImpedances = data.get_impedances()
- ln_modulus_exp: NDArray[float64] = ln(abs(Z_exp))
- phase_exp: Phases = angle(Z_exp)
- num_smoothing: int = 3 if smoothing == "auto" else 1
+
+ X_exp: NDArray[complex128] = data.get_impedances() ** (-1 if admittance else 1)
+ offset: float = 0.0
+ # TODO: Apply also when admittance==False?
+ if admittance and min(X_exp.real) < 0.0:
+ offset = abs(min(X_exp.real))
+ X_exp += offset
+
+ ln_modulus_exp: NDArray[float64] = ln(abs(X_exp))
+ phase_exp: Phases = angle(X_exp)
+
+ num_smoothing: int = 5 if smoothing == "auto" else 1
num_interpolation: int = 3 if interpolation == "auto" else 1
num_window: int = len(_WINDOW_FUNCTIONS) if window == "auto" else 1
+
num_steps: int = 0
# Generate weights
num_steps += num_window
@@ -324,6 +361,7 @@ def perform_zhit(
num_steps += num_smoothing * num_interpolation
# Offset adjustment
num_steps += num_window * (num_smoothing * num_interpolation)
+
prog: Progress
with Progress("Performing Z-HIT", total=num_steps + 1) as prog:
window_options: Dict[str, NDArray[float64]] = _generate_window_options(
@@ -334,6 +372,7 @@ def perform_zhit(
width,
prog,
)
+
smoothing_options: Dict[str, Phases] = _generate_smoothing_options(
smoothing,
num_points,
@@ -343,6 +382,7 @@ def perform_zhit(
phase_exp,
prog,
)
+
interpolation_options: Dict[str, Dict[str, Callable]]
simulated_phase: Dict[str, Dict[str, Phases]]
interpolation_options, simulated_phase = _generate_interpolation_options(
@@ -351,31 +391,44 @@ def perform_zhit(
smoothing_options,
prog,
)
+
reconstructions: List[Tuple[NDArray[float64], Phases, str, str]]
reconstructions = _reconstruct_modulus_data(
interpolation_options,
simulated_phase,
ln_omega,
+ admittance,
num_procs,
prog,
)
- results: List[Tuple[float, NDArray[float64], str, str, str]]
+
+ results: List[Tuple[float, NDArray[complex128], str, str, str]]
results = _adjust_modulus_offset(
reconstructions,
window_options,
ln_modulus_exp,
- Z_exp,
+ X_exp,
+ admittance,
num_procs,
prog,
)
- Xps: float
- Z_fit: ComplexImpedances
- Xps, Z_fit, smoothing, interpolation, window = results[0]
+
+ pseudo_chisqr: float
+ X_fit: NDArray[complex128]
+ pseudo_chisqr, X_fit, smoothing, interpolation, window = results[0]
+ X_fit -= offset
+
+ Z_fit: ComplexImpedances = X_fit ** (-1 if admittance else 1)
+ residuals: ComplexResiduals = _calculate_residuals(
+ Z_exp=data.get_impedances(),
+ Z_fit=Z_fit,
+ )
+
return ZHITResult(
frequencies=f,
impedances=Z_fit,
- residuals=_calculate_residuals(Z_exp=Z_exp, Z_fit=Z_fit),
- pseudo_chisqr=Xps,
+ residuals=residuals,
+ pseudo_chisqr=pseudo_chisqr,
smoothing=smoothing,
interpolation=interpolation,
window=window,
diff --git a/src/pyimpspec/analysis/zhit/interpolation.py b/src/pyimpspec/analysis/zhit/interpolation.py
index 0bb46a0..7eb3f9f 100644
--- a/src/pyimpspec/analysis/zhit/interpolation.py
+++ b/src/pyimpspec/analysis/zhit/interpolation.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -46,12 +46,14 @@ def _interpolate_phase(
ln_omega = flip(ln_omega)
phase = flip(phase)
+
if interpolation == "akima":
return Akima1DInterpolator(ln_omega, phase)
elif interpolation == "cubic":
return CubicSpline(ln_omega, phase)
elif interpolation == "pchip":
return PchipInterpolator(ln_omega, phase)
+
raise ZHITError(f"Unsupported interpolation: '{interpolation}'!")
@@ -62,8 +64,10 @@ def _generate_interpolation_options(
prog: Progress,
) -> Tuple[Dict[str, Dict[str, Callable]], Dict[str, Dict[str, Phases]]]:
prog.set_message("Interpolating phase data")
+
interpolation_options: Dict[str, Dict[str, Callable]] = {}
simulated_phase: Dict[str, Dict[str, Phases]] = {}
+
phase: Phases
interpolator: Callable
for interpolation in (
@@ -71,6 +75,7 @@ def _generate_interpolation_options(
):
interpolation_options[interpolation] = {}
simulated_phase[interpolation] = {}
+
for smoothing, phase in smoothing_options.items():
interpolator = _interpolate_phase(
interpolation,
@@ -78,8 +83,11 @@ def _generate_interpolation_options(
phase,
)
interpolation_options[interpolation][smoothing] = interpolator
+
simulated_phase[interpolation][smoothing] = array(
list(map(interpolator, ln_omega))
)
+
prog.increment()
+
return (interpolation_options, simulated_phase)
diff --git a/src/pyimpspec/analysis/zhit/offset.py b/src/pyimpspec/analysis/zhit/offset.py
index d373070..8098708 100644
--- a/src/pyimpspec/analysis/zhit/offset.py
+++ b/src/pyimpspec/analysis/zhit/offset.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
Tuple,
)
from numpy import (
+ complex128,
exp,
float64,
vectorize,
@@ -50,6 +51,7 @@ def _offset_residual(
) -> NDArray[float64]:
offset: float = parameters.valuesdict()["offset"]
errors: NDArray[float64] = ((reconstruction + offset) - ln_modulus) ** 2
+
return weights * errors
@@ -75,8 +77,10 @@ def _calculate_modulus_offset(
)
if where(weights < 0.0)[0].size > 0:
raise ZHITError("Weights must be non-negative values!")
+
parameters: Parameters = Parameters()
parameters.add("offset", 0.0)
+
fit: MinimizerResult = minimize(
_offset_residual,
parameters,
@@ -86,26 +90,32 @@ def _calculate_modulus_offset(
weights,
),
)
+
return fit.params.valuesdict()["offset"]
-def _adjust_offset(args) -> Tuple[float, ComplexImpedances, str, str, str]:
+def _adjust_offset(args) -> Tuple[float, NDArray[complex128], str, str, str]:
(
ln_modulus,
phase,
ln_modulus_exp,
weights,
- Z_exp,
+ X_exp,
+ admittance,
smoothing,
interpolation,
window,
) = args
+
offset: float = _calculate_modulus_offset(ln_modulus, ln_modulus_exp, weights)
- Z_fit: ComplexImpedances = rect(exp(ln_modulus + offset), phase)
- Xps = _calculate_pseudo_chisqr(Z_exp=Z_exp, Z_fit=Z_fit)
+ X_fit: NDArray[complex128] = rect(exp(ln_modulus + offset), phase)
+
return (
- Xps,
- Z_fit,
+ _calculate_pseudo_chisqr(
+ Z_exp=X_exp ** (-1 if admittance else 1),
+ Z_fit=X_fit ** (-1 if admittance else 1),
+ ),
+ X_fit,
smoothing,
interpolation,
window,
@@ -116,12 +126,15 @@ def _adjust_modulus_offset(
reconstructions: List[Tuple[NDArray[float64], Phases, str, str]],
window_options: Dict[str, NDArray[float64]],
ln_modulus_exp: NDArray[float64],
- Z_exp: ComplexImpedances,
+ X_exp: NDArray[complex128],
+ admittance: bool,
num_procs: int,
prog: Progress,
-) -> List[Tuple[float, NDArray[float64], str, str, str]]:
+) -> List[Tuple[float, NDArray[complex128], str, str, str]]:
prog.set_message("Adjusting modulus offset")
- results: List[Tuple[float, NDArray[float64], str, str, str]] = []
+
+ results: List[Tuple[float, ComplexImpedances, str, str, str]] = []
+
args = []
window: str
weights: NDArray[float64]
@@ -130,27 +143,30 @@ def _adjust_modulus_offset(
phase: Phases
smoothing: str
interpolation: str
- for (ln_modulus, phase, smoothing, interpolation) in reconstructions:
+ for ln_modulus, phase, smoothing, interpolation in reconstructions:
args.append(
(
ln_modulus,
phase,
ln_modulus_exp,
weights,
- Z_exp,
+ X_exp,
+ admittance,
smoothing,
interpolation,
window,
)
)
+
if len(args) > 1 and num_procs > 1:
with Pool(num_procs) as pool:
for res in pool.imap_unordered(_adjust_offset, args):
results.append(res)
prog.increment()
+
else:
for res in map(_adjust_offset, args):
results.append(res)
prog.increment()
- results.sort(key=lambda _: _[0])
- return results
+
+ return sorted(results, key=lambda _: _[0])
diff --git a/src/pyimpspec/analysis/zhit/reconstruction.py b/src/pyimpspec/analysis/zhit/reconstruction.py
index 489f745..5a34437 100644
--- a/src/pyimpspec/analysis/zhit/reconstruction.py
+++ b/src/pyimpspec/analysis/zhit/reconstruction.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -51,18 +51,30 @@ def _reconstruct(args) -> Tuple[NDArray[float64], str, str]:
derivator: Any
smoothing: str
interpolation: str
- (ln_omega, interpolator, derivator, smoothing, interpolation) = args
+ admittance: bool
+ (
+ ln_omega,
+ interpolator,
+ derivator,
+ smoothing,
+ interpolation,
+ admittance,
+ ) = args
+
ln_modulus = []
ln_w_s: float = ln_omega[0]
+ gamma = -pi / 6
+
i: int
ln_w_0: float
for i, ln_w_0 in enumerate(ln_omega):
- gamma = -pi / 6
attempts: int = 10
epsabs: float = 1e-9
limit: int = 100
+
with catch_warnings():
filterwarnings("error", category=IntegrationWarning)
+
while True:
try:
integral = quad(
@@ -73,6 +85,7 @@ def _reconstruct(args) -> Tuple[NDArray[float64], str, str]:
limit=limit,
)[0]
break
+
except IntegrationWarning as e:
attempts -= 1
if attempts <= 0:
@@ -85,11 +98,16 @@ def _reconstruct(args) -> Tuple[NDArray[float64], str, str]:
else:
print(e)
break
+
derivative = derivator(ln_w_0)
if isnan(derivative):
derivative = 0
- mod: float = 2 / pi * integral + gamma * derivative
- ln_modulus.append(mod)
+
+ if admittance:
+ ln_modulus.append(-(-2 / pi * integral - gamma * derivative))
+ else:
+ ln_modulus.append(2 / pi * integral + gamma * derivative)
+
return (array(ln_modulus), smoothing, interpolation)
@@ -97,12 +115,15 @@ def _reconstruct_modulus_data(
interpolation_options: Dict[str, Dict[str, Any]],
simulated_phase: Dict[str, Dict[str, Phases]],
ln_omega: NDArray[float64],
+ admittance: bool,
num_procs: int,
prog: Progress,
) -> List[Tuple[NDArray[float64], Phases, str, str]]:
prog.set_message("Reconstructing modulus data")
+
reconstructions: List[Tuple[NDArray[float64], Phases, str, str]] = []
- args: List[Tuple[NDArray[float64], Any, Any, str, str]] = []
+ args: List[Tuple[NDArray[float64], Any, Any, str, str, bool]] = []
+
interpolation: str
for interpolation in interpolation_options:
smoothing: str
@@ -115,12 +136,14 @@ def _reconstruct_modulus_data(
interpolator.derivative(1),
smoothing,
interpolation,
+ admittance,
)
)
+
ln_modulus: NDArray[float64]
if len(args) > 1 and num_procs > 1:
with Pool(num_procs) as pool:
- for (ln_modulus, smoothing, interpolation) in pool.imap_unordered(
+ for ln_modulus, smoothing, interpolation in pool.imap_unordered(
_reconstruct,
args,
):
@@ -133,8 +156,9 @@ def _reconstruct_modulus_data(
)
)
prog.increment()
+
else:
- for (ln_modulus, smoothing, interpolation) in map(_reconstruct, args):
+ for ln_modulus, smoothing, interpolation in map(_reconstruct, args):
reconstructions.append(
(
ln_modulus,
@@ -144,4 +168,5 @@ def _reconstruct_modulus_data(
)
)
prog.increment()
+
return reconstructions
diff --git a/src/pyimpspec/analysis/zhit/smoothing.py b/src/pyimpspec/analysis/zhit/smoothing/__init__.py
similarity index 70%
rename from src/pyimpspec/analysis/zhit/smoothing.py
rename to src/pyimpspec/analysis/zhit/smoothing/__init__.py
index 81c38a4..76a5a2f 100644
--- a/src/pyimpspec/analysis/zhit/smoothing.py
+++ b/src/pyimpspec/analysis/zhit/smoothing/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -23,6 +23,8 @@
from pyimpspec.exceptions import ZHITError
from pyimpspec.typing import Phases
from pyimpspec.progress import Progress
+from .modified_sinc import _smooth_like_savgol as modsinc
+from .whittaker_henderson import _smooth as whithend
def _smooth_phase(
@@ -35,6 +37,7 @@ def _smooth_phase(
) -> Phases:
if smoothing == "none":
return phase
+
elif smoothing == "savgol":
from scipy.signal import savgol_filter
@@ -43,8 +46,15 @@ def _smooth_phase(
window_length=num_points,
polyorder=polynomial_order,
)
+
elif smoothing == "lowess":
- from statsmodels.nonparametric.smoothers_lowess import lowess
+ try:
+ from statsmodels.nonparametric.smoothers_lowess import lowess
+ except ImportError:
+ raise ImportError(
+ "The optional dependency 'statsmodels' could not be imported! "
+ + "Consider installing the dependency if LOWESS smoothing is required."
+ )
return lowess(
phase,
@@ -53,6 +63,22 @@ def _smooth_phase(
frac=num_points / len(phase),
it=num_iterations,
)
+
+ elif smoothing == "whithend":
+ return whithend(
+ phase,
+ degree=polynomial_order,
+ m=num_points,
+ )
+
+ elif smoothing == "modsinc":
+ return modsinc(
+ phase,
+ degree=polynomial_order,
+ m=num_points,
+ is_MS1=False,
+ )
+
raise ZHITError(f"Unsupported smoothing: '{smoothing}'!")
@@ -66,9 +92,18 @@ def _generate_smoothing_options(
prog: Progress,
) -> Dict[str, Phases]:
prog.set_message("Smoothing phase data")
+
smoothing_options: Dict[str, Phases] = {}
for smoothing in (
- ["none", "lowess", "savgol"] if smoothing == "auto" else [smoothing]
+ [
+ "none",
+ "lowess",
+ "modsinc",
+ "savgol",
+ "whithend",
+ ]
+ if smoothing == "auto"
+ else [smoothing]
):
smoothing_options[smoothing] = _smooth_phase(
smoothing,
@@ -79,4 +114,5 @@ def _generate_smoothing_options(
phase_exp,
)
prog.increment()
+
return smoothing_options
diff --git a/src/pyimpspec/analysis/zhit/smoothing/modified_sinc.py b/src/pyimpspec/analysis/zhit/smoothing/modified_sinc.py
new file mode 100644
index 0000000..a1e5a15
--- /dev/null
+++ b/src/pyimpspec/analysis/zhit/smoothing/modified_sinc.py
@@ -0,0 +1,459 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+# Modified sinc kernel with linear extrapolation that was included in the article:
+# https://doi.org/10.1021/acsmeasuresciau.1c00054
+#
+# Ported from the Java source code included in the correction:
+# https://doi.org/10.1021/acsmeasuresciau.3c00017
+#
+# The original Java source code was written by Michael Schmid
+# and was licensed under GPLv3.
+
+from dataclasses import dataclass
+from typing import (
+ List,
+ Optional,
+ Tuple,
+)
+from numpy import (
+ exp,
+ float64,
+ ceil,
+ cos,
+ isnan,
+ nan,
+ pi,
+ sin,
+ zeros,
+)
+from numpy.typing import NDArray
+
+
+def _smooth_except_boundaries(
+ data: NDArray[float64],
+ kernel: NDArray[float64],
+) -> NDArray[float64]:
+ """
+ Smooths the data with the parameters passed with the constructor,
+ except for the near-end points.
+ """
+ out: NDArray[float64] = zeros(data.shape, dtype=float64)
+ radius: int = len(kernel) - 1 # how many additional points we need
+
+ for i in range(radius, len(data) - radius):
+ total: float = kernel[0] * data[i]
+
+ for j in range(1, len(kernel)):
+ total += kernel[j] * (data[i - j] + data[i + j])
+
+ out[i] = total
+
+ return out
+
+
+def _bandwidth_to_m(is_MS1: bool, degree: int, bandwidth: float) -> int:
+ """
+ Calculates the kernel halfwidth m that comes closest to the desired
+ band width, i.e., the frequency where the response decreases to
+ -3 dB, i.e., 1/sqrt(2).
+ """
+ if not (0 < bandwidth < 0.5):
+ raise ValueError(
+ f"Calculated bandwidth is out of bounds: 0 < {bandwidth=:.3g} < 0.5"
+ )
+
+ radius: float = (
+ ((0.27037 + 0.24920 * degree) / bandwidth - 1.0)
+ if is_MS1
+ else ((0.74548 + 0.24943 * degree) / bandwidth - 1.0)
+ )
+
+ return int(round(radius))
+
+
+def _make_kernel(
+ is_MS1: bool,
+ degree: int,
+ m: int,
+ coeffs: Optional[NDArray[float64]],
+) -> NDArray[float64]:
+ """
+ Creates an MS or MS1 kernel and returns it.
+ """
+ kernel: NDArray[float64] = zeros(m + 1, dtype=float64)
+ num_coeffs: int = 0 if coeffs is None else len(coeffs)
+ total: float = 0.0
+
+ i: int
+ for i in range(0, m + 1):
+ # x=0 at center, x=1 at zero
+ x: float = i * (1.0 / (m + 1))
+ sinc_arg: float = pi * 0.5 * (degree + (2 if is_MS1 else 4)) * x
+ k: float = 1 if i == 0 else sin(sinc_arg) / sinc_arg
+
+ if coeffs is not None:
+ j: int
+ for j in range(0, num_coeffs):
+ if is_MS1:
+ # shorter kernel version, needs more correction terms
+ k += coeffs[j] * x * sin((j + 1) * pi * x)
+ else:
+ # start at 1 for degree 6, 10; at 2 for degree 8
+ nu: int = 2 if ((degree // 2) & 0x1) == 0 else 1
+ k += coeffs[j] * x * sin((2 * j + nu) * pi * x)
+
+ # decay alpha=2: 13.5% at end without correction, 2sqrt2 sigma
+ decay: float = 2 if is_MS1 else 4
+ k *= (
+ exp(-x * x * decay)
+ + exp(-(x - 2) * (x - 2) * decay)
+ + exp(-(x + 2) * (x + 2) * decay)
+ - 2 * exp(-decay)
+ - exp(-9 * decay)
+ )
+
+ kernel[i] = k
+
+ total += k
+ if i > 0:
+ # off-center kernel elements appear twice
+ total += k
+
+ for i in range(0, m + 1):
+ # normalize the kernel to total=1
+ kernel[i] *= 1.0 / total
+
+ return kernel
+
+
+def _get_correction_data(is_MS1, degree: int) -> Optional[List[List[float]]]:
+ """
+ Returns degree-specific correction data or None if not required.
+ """
+ if is_MS1:
+ if degree < 4:
+ return None
+ elif degree == 4:
+ return [
+ [0.021944195, 0.050284006, 0.765625],
+ ]
+ elif degree == 6:
+ return [
+ [0.0018977303, 0.008476806, 1.2625],
+ [0.023064667, 0.13047926, 1.2265625],
+ ]
+ elif degree == 8:
+ return [
+ [0.0065903002, 0.057929456, 1.915625],
+ [0.0023234477, 0.010298849, 2.2726562],
+ [0.021046653, 0.16646601, 1.98125],
+ ]
+ elif degree == 10:
+ return [
+ [9.749618e-4, 0.0020742896, 3.74375],
+ [0.008975366, 0.09902466, 2.7078125],
+ [0.0024195414, 0.010064855, 3.296875],
+ [0.019185117, 0.18953617, 2.784961],
+ ]
+
+ else:
+ if degree < 6:
+ return None
+ elif degree == 6:
+ return [
+ [0.001717576, 0.02437382, 1.64375],
+ ]
+ elif degree == 8:
+ return [
+ [0.0043993373, 0.088211164, 2.359375],
+ [0.006146815, 0.024715371, 3.6359375],
+ ]
+ elif degree == 10:
+ return [
+ [0.0011840032, 0.04219344, 2.746875],
+ [0.0036718843, 0.12780383, 2.7703125],
+ ]
+
+ raise NotImplementedError(f"Unsupported degree: {degree}")
+
+
+def _get_coefficients(is_MS1: bool, degree: int, m: int) -> Optional[NDArray[float64]]:
+ """
+ Returns the correction coefficients for a Sinc*Gaussian kernel
+ to flatten the passband. Coefficients z for the x*sin((j+1)*pi*x) terms,
+ or null if no correction is required.
+ """
+ correction_data: Optional[List[List[float]]] = _get_correction_data(is_MS1, degree)
+ if correction_data is None:
+ return None
+
+ coeffs: NDArray[float64] = zeros(len(correction_data), dtype=float64)
+ for i, (a, b, c) in enumerate(correction_data):
+ coeffs[i] = a + b / (c - m) ** 3
+
+ return coeffs
+
+
+def _make_fit_weights(is_MS1: bool, degree: int, m: int) -> NDArray[float64]:
+ """
+ Returns the weights for the linear fit used for linear extrapolation
+ at the end. The weight function is a Hann (cos^2) function. For beta=1
+ (the beta value for n=4), it decays to zero at the position of the
+ first zero of the sinc function in the kernel. Larger beta values lead
+ to stronger noise suppression near the edges, but the smoothed curve
+ does not follow the input as well as for lower beta (for high degrees,
+ also leading to more ringing near the boundaries).
+ """
+ first_zero: float = (m + 1) / ((1.0 if is_MS1 else 1.5) + 0.5 * degree)
+ beta: float = (
+ (0.65 + 0.35 * exp(-0.55 * (degree - 4)))
+ if is_MS1
+ else 0.70 + 0.14 * exp(-0.60 * (degree - 4))
+ )
+ fit_length: int = int(ceil(first_zero * beta))
+ weights: NDArray[float64] = zeros(fit_length, dtype=float64)
+
+ p: int
+ for p in range(0, fit_length):
+ weights[p] = cos(0.5 * pi / (first_zero * beta) * p) ** 2
+
+ return weights
+
+
+def _savitzky_golay_bandwidth(degree: int, m: int) -> float:
+ """
+ Calculates the bandwidth of a traditional Savitzky-Golay filter.
+ The -3 dB-bandwidth of the Savitzky-Golay filter, i.e. the frequency where
+ the response is 1/sqrt(2). The sampling frequency is defined as f = 1.
+ For degree up to 10, the accuracy is typically much better than 1%;
+ higher errors occur only for the lowest m values where the Savitzky-Golay
+ filter is defined (worst case: 4% error at degree = 10, m = 6).
+ """
+ return 1.0 / (
+ 6.352 * (m + 0.5) / (degree + 1.379) - (0.513 + 0.316 * degree) / (m + 0.5)
+ )
+
+
+def _extend_data(
+ data: NDArray[float64],
+ degree: int,
+ m: int,
+ fit_weights: NDArray[float64],
+) -> NDArray[float64]:
+ """
+ Extends the data by a weighted fit to a linear function (linear regression).
+ At each end, m extrapolated points are appended.
+ """
+ extended_data: NDArray[float64] = zeros(len(data) + 2 * m, dtype=float64)
+ extended_data[m:m + len(data)] = data
+
+ lin_reg: LinearRegression = LinearRegression()
+
+ # Linear fit of first points and extrapolate
+ fit_length: int = min((len(fit_weights), len(data)))
+
+ p: int
+ for p in range(0, fit_length):
+ lin_reg.add_point(p, data[p], fit_weights[p])
+
+ slope: float
+ offset: float
+ slope, offset = lin_reg.calculate()
+
+ p = -1
+ while p >= -m:
+ extended_data[m + p] = offset + slope * p
+ p -= 1
+
+ # Linear fit of last points and extrapolate
+ lin_reg.clear()
+
+ for p in range(0, fit_length):
+ lin_reg.add_point(p, data[len(data) - 1 - p], fit_weights[p])
+
+ slope, offset = lin_reg.calculate()
+
+ p = -1
+ while p >= -m:
+ extended_data[len(data) + m - 1 - p] = offset + slope * p
+ p -= 1
+
+ return extended_data
+
+
+@dataclass
+class LinearRegression:
+ sum_weights: float = 0.0
+ sum_x: float = 0.0
+ sum_y: float = 0.0
+ sum_xy: float = 0.0
+ sum_x2: float = 0.0
+ # sum_y2: float = 0.0
+
+ def clear(self):
+ self.sum_weights = 0.0
+ self.sum_x = 0.0
+ self.sum_y = 0.0
+ self.sum_xy = 0.0
+ self.sum_x2 = 0.0
+ # self.sum_y2 = 0.0
+
+ def add_point(self, x: float, y: float, weight: float):
+ self.sum_weights += weight
+ self.sum_x += weight * x
+ self.sum_y += weight * y
+ self.sum_xy += weight * x * y
+ self.sum_x2 += weight * x**2
+ # self.sum_y2 += weight * y**2
+
+ def calculate(self) -> Tuple[float, float]:
+ std_x2_times_N: float = self.sum_x2 - self.sum_x * self.sum_x * (
+ 1 / self.sum_weights
+ )
+ # std_y2_times_N: float = self.sum_y2 - self.sum_y * self.sum_y * (1 / self.sum_weights)
+
+ slope: float
+ if self.sum_weights > 0:
+ slope = (
+ self.sum_xy - self.sum_x * self.sum_y * (1 / self.sum_weights)
+ ) / std_x2_times_N
+ if isnan(slope):
+ slope = 0.0 # slope 0 if only one x value
+ else:
+ slope = nan
+
+ offset = (self.sum_y - slope * self.sum_x) / self.sum_weights
+
+ return (
+ slope,
+ offset,
+ )
+
+
+def _smooth(
+ data: NDArray[float64],
+ degree: int,
+ m: int,
+ is_MS1: bool = False,
+) -> NDArray[float64]:
+ max_degree: int = 10
+ if not (2 <= degree <= max_degree and (degree & 0x1) == 0):
+ raise ValueError(
+ f"Only the following degrees are supported: {', '.join(map(str, range(2, max_degree+1, 2)))}"
+ )
+
+ min_m: int = degree // 2 + (1 if is_MS1 else 2)
+ if not (m >= min_m):
+ raise ValueError(
+ f"The kernel half-width must be greater than or equal to {min_m}"
+ )
+
+ coeffs: Optional[NDArray[float64]] = _get_coefficients(is_MS1, degree, m)
+ kernel: NDArray[float64] = _make_kernel(is_MS1, degree, m, coeffs)
+ fit_weights: NDArray[float64] = _make_fit_weights(is_MS1, degree, m)
+
+ radius: int = len(kernel) - 1
+ extended_data: NDArray[float64] = _extend_data(data, degree, radius, fit_weights)
+ extended_smoothed: NDArray[float64] = _smooth_except_boundaries(
+ extended_data,
+ kernel,
+ )
+
+ return extended_smoothed[radius:radius + len(data)]
+
+
+def _smooth_like_savgol(
+ data: NDArray[float64],
+ degree: int,
+ m: int,
+ is_MS1: bool = False,
+) -> NDArray[float64]:
+ """
+ Smooths the data in a way comparable to a traditional Savitzky-Golay
+ filter with the given parameters degree and m.
+
+ Parameters
+ ----------
+ data: NDArray[float64]
+ The data to be smoothed.
+
+ degree: int
+ The degree of the polynomial fit used in the Savitzky-Golay filter.
+ Must be an even number less than or equal to ten.
+
+ m: int
+ Half-width of the Savitzky-Golay kernel.
+
+ is_MS1: bool, optional
+ Use the MS1 variant, which has a smaller kernel size, at the cost
+ of reduced stopband suppression and more gradual cutoff for degree=2.
+ Otherwise, standard MS kernels are used.
+
+ Returns
+ -------
+ NDArray[float64]
+ The smoothed data.
+ """
+ bandwidth: float = _savitzky_golay_bandwidth(degree, m)
+ m = _bandwidth_to_m(is_MS1, degree, bandwidth)
+
+ return _smooth(data, degree, m, is_MS1)
+
+
+def _test():
+ from numpy import (
+ array,
+ isclose,
+ )
+
+ data = array(
+ [0, 1, -2, 3, -4, 5, -6, 7, -8, 9, 10, 6, 3, 1, 0],
+ dtype=float64,
+ )
+ smoothed = _smooth(
+ data=data,
+ degree=6,
+ m=7,
+ is_MS1=False,
+ )
+
+ control = array(
+ [
+ 0.1583588453161306,
+ 0.11657466389491726,
+ -0.09224721042380793,
+ 0.031656885544917315,
+ -0.054814729808335835,
+ -0.054362188355910813,
+ 0.5105482655952578,
+ -0.5906786605713916,
+ -1.2192869459451745,
+ 5.286105202110525,
+ 10.461619519603234,
+ 6.82674246410578,
+ 2.4923674303784833,
+ 1.0422038091960153,
+ 0.032646599192913656,
+ ]
+ )
+
+ for s, c in zip(smoothed, control):
+ if not isclose(s, c):
+ raise ValueError(f"Expected {s=} to be almost equal to {c=}")
diff --git a/src/pyimpspec/analysis/zhit/smoothing/whittaker_henderson.py b/src/pyimpspec/analysis/zhit/smoothing/whittaker_henderson.py
new file mode 100644
index 0000000..afa79f0
--- /dev/null
+++ b/src/pyimpspec/analysis/zhit/smoothing/whittaker_henderson.py
@@ -0,0 +1,292 @@
+# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
+# Copyright 2024 pyimpspec developers
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
+# the LICENSES folder.
+
+# Whittaker-Henderson smoothing that was included in the article:
+# https://doi.org/10.1021/acsmeasuresciau.1c00054
+#
+# Ported from the Java source code included in the correction:
+# https://doi.org/10.1021/acsmeasuresciau.3c00017
+#
+# The original Java source code was written by Michael Schmid
+# and was licensed under GPLv3.
+
+from numpy import (
+ cos,
+ float64,
+ pi,
+ sqrt,
+ zeros,
+)
+from numpy.typing import NDArray
+from pyimpspec.typing.helpers import (
+ List,
+ _is_floating_array,
+ _is_integer,
+)
+
+
+def _bandwidth_to_lambda(order: int, bandwidth: float) -> float:
+ """
+ Calculates the lambda smoothing parameter for a given penalty derivative
+ order, given the desired band width, i.e., the frequency where the response
+ decreases to -3 dB, i.e., 1/sqrt(2). This band width is valid for points
+ far from the boundaries of the data.
+ """
+ if not (0 < bandwidth < 0.5):
+ raise ValueError(f"Expected 0 < {bandwidth=} < 0.5")
+
+ omega: float = 2 * pi * bandwidth
+ cos_term: float = 2 * (1 - cos(omega))
+ cos_power: float = cos_term
+
+ i: int
+ for i in range(1, order):
+ # finally results in (2-2*cos(omega))^order
+ cos_power *= cos_term
+
+ return (sqrt(2) - 1) / cos_power
+
+
+def _savitzky_golay_bandwidth(degree: int, m: int) -> float:
+ """
+ Calculates the bandwidth of a traditional Savitzky-Golay filter.
+
+ Returns the -3 dB-bandwidth of the Savitzky-Golay filter, i.e. the
+ frequency where the response is 1/sqrt(2). The sampling frequency
+ is defined as f = 1. For degree up to 10, the accuracy is typically
+ much better than 1%; higher errors occur only for the lowest m values
+ where the Savitzky-Golay filter is defined (worst case: 4% error at
+ degree = 10, m = 6).
+ """
+ return 1.0 / (
+ 6.352 * (m + 0.5) / (degree + 1.379) - (0.513 + 0.316 * degree) / (m + 0.5)
+ )
+
+
+def _make_D_prime_D_matrix(order: int, size: int) -> List[List[float]]:
+ """
+ Creates a symmetric band-diagonal matrix D'*D where D is the n-th
+ derivative matrix and D' its transpose.
+ """
+ # Maximum penalty derivative order and corresponding coefficients
+ max_order: int = 5
+ if not (1 <= order <= max_order):
+ raise ValueError(
+ f"Expected the order to be a positive value less than or equal to {max_order}"
+ )
+
+ if not (size >= order):
+ raise ValueError(f"Expected {size=} >= {order=}")
+
+ coeffs: List[int] = [
+ [-1, 1],
+ [1, -2, 1],
+ [-1, 3, -3, 1],
+ [1, -4, 6, -4, 1],
+ [-1, 5, -10, 10, -5, 1],
+ ][order - 1]
+
+ out: List[List[float]] = [[]] * (order + 1)
+
+ d: int # Distance from diagonal
+ for d in range(0, order + 1):
+ out[d] = [0.0] * (size - d)
+
+ for d in range(0, order + 1):
+ length: int = len(out[d])
+
+ i: int
+ for i in range(0, (length + 1) // 2):
+ total: float = 0.0
+
+ j: int = max((0, i - length + len(coeffs) - d))
+ while j < i + 1 and j < len(coeffs) - d:
+ total += coeffs[j] * coeffs[j + d]
+ j += 1
+
+ out[d][i] = total
+ out[d][length - 1 - i] = total
+
+ return out
+
+
+def _times_lambda_plus_identity(b: List[List[float]], lmbd: float) -> List[List[float]]:
+ """
+ Modifies a symmetric band-diagonal matrix b so that the output is
+ 1 + lambda*b where 1 is the identity matrix.
+ """
+ i: int
+ for i in range(0, len(b[0])):
+ b[0][i] = 1.0 + b[0][i] * lmbd
+
+ d: int
+ for d in range(1, len(b)):
+ for i in range(0, len(b[d])):
+ b[d][i] = b[d][i] * lmbd
+
+ return b
+
+
+def _cholesky_L(b: List[List[float]]) -> List[List[float]]:
+ """
+ Cholesky decomposition of a symmetric band-diagonal matrix b.
+ The input is replaced by the lower left trianglar matrix.
+ """
+ n: int = len(b[0])
+ dmax = len(b) - 1
+
+ i: int
+ for i in range(0, n):
+ j: int
+ for j in range(max((0, i - dmax)), i + 1):
+ total: float = 0.0
+
+ k: int
+ for k in range(max((0, i - dmax)), j):
+ total += b[i - k][k] * b[j - k][k]
+
+ if i == j:
+ sqrtArg: float = b[0][i] - total
+ if not (sqrtArg > 0.0):
+ raise ValueError("Matrix is not positive definite")
+
+ b[0][i] = sqrt(sqrtArg)
+ else:
+ dAij: int = i - j
+ b[dAij][j] = 1.0 / b[0][j] * (b[dAij][j] - total)
+
+ return b
+
+
+def _solve(b: List[List[float]], vec: NDArray[float64]) -> NDArray[float64]:
+ """
+ Solves the equation b*y = vec for y (forward substitution) and
+ thereafter b'*x = y, where b' is the transposed (back substitution)
+
+ Returns vector x resulting from forward and back subsitution. If b is the
+ result of Cholesky decomposition, then x is the solution for A*x = vec.
+ For data smoothing, x holds the smoothed data.
+ """
+ out: NDArray[float64] = zeros(vec.shape, dtype=float64)
+ n: int = len(b[0])
+ dmax: int = len(b) - 1
+
+ i: int
+ for i in range(0, n):
+ total: float = 0.0
+
+ j: int
+ for j in range(max((0, i - dmax)), i):
+ total += b[i - j][j] * out[j]
+
+ out[i] = (vec[i] - total) / b[0][i]
+
+ i = n - 1
+ while i >= 0:
+ total = 0.0
+
+ for j in range(i + 1, min((i + dmax + 1, n))):
+ total += b[j - i][i] * out[j]
+
+ out[i] = (out[i] - total) / b[0][i]
+
+ i -= 1
+
+ return out
+
+
+def _smooth(data: NDArray[float64], degree: int, m: int) -> NDArray[float64]:
+ """
+ Interface for using the Whittaker-Henderson smoothing algorithm with
+ parameters associated with Savitzky-Golay filters.
+
+ Minimizes
+
+ sum(f - y)^2 + sum(lambda * f'(p))
+
+ where y are the data, f are the smoothed data, and f'(p) is the p-th
+ derivative of the smoothed function evaluated numerically. In other words,
+ the filter imposes a penalty on the p-th derivative of the data, which is
+ taken as a measure of non-smoothness. Smoothing increases with increasing
+ value of lambda. The current implementation works up to p = 5; usually one
+ should use p = 2 or 3.
+
+ For points far from the boundaries of the data series, the frequency
+ response of the smoother is given by
+
+ 1/(1+lambda*(2-2*cos(omega))^2p)
+
+ where n is the order of the penalized derivative and omega = 2*pi*f/fs,
+ with fs being the sampling frequency (reciprocal of the distance between
+ the data points).
+
+ Note that strong smoothing leads to numerical noise (which is smoothed
+ similar to the input data, thus not obvious in the output). For
+ lambda = 1e9, the noise is about 1e-6 times the magnitude of the data.
+ Since higher p values require a higher value of lambda for the same extent
+ of smoothing (the same band width), numerical noise is increasingly
+ bothersome for large p, not for p <= 2.
+
+ Parameters
+ ----------
+ data: NDArray[float64]
+ The data to be smoothed.
+
+ degree: int
+ The degree of the polynomial fit used in the Savitzky-Golay filter.
+
+ m: int
+ Half-width of the Savitzky-Golay kernel. Kernel size of the
+ Savitzky-Golay filter is 2*m + 1. Schmid recommends the following
+ limits:
+
+ degree | m
+ 2 | 700
+ 4 | 190
+ 6 | 100
+ 8 | 75
+
+ Returns
+ -------
+ NDArray[float64]
+ The smoothed data.
+ """
+ if not _is_floating_array(data):
+ raise TypeError(f"Expected an array of floats instead of {data=}")
+ elif not (len(data.shape) == 1):
+ raise ValueError(f"Expected {len(data.shape)=} == 1")
+
+ if not _is_integer(degree):
+ raise TypeError(f"Expected an integer instead of {degree=}")
+
+ if not _is_integer(m):
+ raise TypeError(f"Expected an integer instead of {m=}")
+
+ order: int = degree // 2 + 1
+ matrix: List[List[float]] = _make_D_prime_D_matrix(order, len(data))
+
+ bandwidth: float = _savitzky_golay_bandwidth(degree, m)
+ lmbd: float = _bandwidth_to_lambda(order, bandwidth)
+ matrix = _times_lambda_plus_identity(matrix, lmbd)
+
+ matrix = _cholesky_L(matrix)
+ if not (len(data) == len(matrix[0])):
+ raise ValueError(f"Expected {len(data)=} == {len(matrix[0])=}")
+
+ return _solve(matrix, data)
diff --git a/src/pyimpspec/analysis/zhit/weights.py b/src/pyimpspec/analysis/zhit/weights.py
index c7dca8e..9cfc035 100644
--- a/src/pyimpspec/analysis/zhit/weights.py
+++ b/src/pyimpspec/analysis/zhit/weights.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -48,18 +48,22 @@ def _initialize_window_functions():
from scipy.signal import windows as scipy_windows
global _WINDOW_FUNCTIONS
+
name: str
for name in dir(scipy_windows):
if name.startswith("_"):
continue
elif not callable(getattr(scipy_windows, name)):
continue
+
func: Callable = getattr(scipy_windows, name)
sig: Signature = signature(func)
+
if not ("M" in sig.parameters and "sym" in sig.parameters):
continue
elif len(sig.parameters) > 2:
continue
+
_WINDOW_FUNCTIONS[name] = func
@@ -76,10 +80,12 @@ def _generate_weights(
f"Unsupported window function: '{window}'! Valid values include:\n- "
+ "\n- ".join(sorted(_WINDOW_FUNCTIONS.keys()))
)
+
weights: NDArray[float64] = zeros(log_f.shape, dtype=float64)
min_log_f: float = center - width / 2
max_log_f: float = center + width / 2
num_points: int = 10 * int(ceil(max_log_f) - floor(min_log_f)) + 1
+
x: List[float] = [
_
for _ in log(
@@ -95,23 +101,30 @@ def _generate_weights(
x.insert(0, min_log_f)
if max_log_f not in x:
x.append(max_log_f)
+
weights_interpolator: Akima1DInterpolator = Akima1DInterpolator(
x,
_WINDOW_FUNCTIONS[window](M=len(x)),
)
+
i: int
lf: float
for i, lf in enumerate(log_f):
if not (min_log_f <= lf <= max_log_f):
continue
weights[i] = weights_interpolator(lf)
- assert len(weights) == len(log_f)
+
+ if not (len(weights) == len(log_f)):
+ raise ValueError(f"Expected {len(weights)=} == {len(log_f)=}")
+
indices = where(weights < 0.0)[0]
if indices.size > 0:
weights[indices] = 0.0
+
indices = where(weights > 1.0)[0]
if indices.size > 0:
weights[indices] = 1.0
+
return weights
@@ -124,17 +137,22 @@ def _generate_window_options(
prog: Progress,
) -> Dict[str, NDArray[float64]]:
prog.set_message("Generating weights")
+
if len(_WINDOW_FUNCTIONS) == 0:
_initialize_window_functions()
window_options: Dict[str, NDArray[float64]] = {}
+
if weights is not None:
window_options["custom"] = weights
prog.increment()
+
elif window == "auto":
for window in _WINDOW_FUNCTIONS:
window_options[window] = _generate_weights(log_f, window, center, width)
prog.increment()
+
else:
window_options[window] = _generate_weights(log_f, window, center, width)
prog.increment()
+
return window_options
diff --git a/src/pyimpspec/circuit/__init__.py b/src/pyimpspec/circuit/__init__.py
index 7a8a151..4bc09e7 100644
--- a/src/pyimpspec/circuit/__init__.py
+++ b/src/pyimpspec/circuit/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -58,7 +58,9 @@ def parse_cdc(cdc: str) -> Circuit:
-------
Circuit
"""
- assert isinstance(cdc, str), cdc
+ if not isinstance(cdc, str):
+ raise TypeError(f"Expected a string instead of {cdc=}")
+
return Parser().process(cdc)
@@ -86,11 +88,17 @@ def simulate_spectrum(
-------
DataSet
"""
- assert isinstance(circuit, Circuit), circuit
- assert isinstance(label, str), label
+ if not isinstance(circuit, Circuit):
+ raise TypeError(f"Expected a Circuit instead of {circuit=}")
+
+ if not isinstance(label, str):
+ raise TypeError(f"Expected a string instead of {label=}")
+
if frequencies is None or len(frequencies) == 0:
frequencies = logspace(5, -2, 71)
elif not isinstance(frequencies, ndarray):
frequencies = array(frequencies, dtype=Frequency)
+
Z: ComplexImpedances = circuit.get_impedances(frequencies)
+
return DataSet(frequencies=frequencies, impedances=Z, label=label)
diff --git a/src/pyimpspec/circuit/base.py b/src/pyimpspec/circuit/base.py
index 0963401..77eca1c 100644
--- a/src/pyimpspec/circuit/base.py
+++ b/src/pyimpspec/circuit/base.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -38,13 +38,10 @@
delete,
fromiter,
indices as array_indices,
- integer,
isinf,
isnan,
isneginf,
isposinf,
- issubdtype,
- ndarray,
unique,
where,
zeros,
@@ -63,6 +60,12 @@
Frequencies,
Indices,
)
+from pyimpspec.typing.helpers import (
+ _cast_to_floating_array,
+ _is_boolean,
+ _is_floating_array,
+ _is_integer,
+)
from pyimpspec.exceptions import (
InfiniteImpedance,
InfiniteLimit,
@@ -73,24 +76,24 @@
def _calculate_limit(obj, f: Frequency) -> ComplexImpedance:
- assert hasattr(obj, "to_sympy") and callable(
- obj.to_sympy
- ), f"The provided object ({obj}) does not have a 'to_sympy' method!"
expr: Expr = obj.to_sympy(substitute=True)
symbols: Set[Basic] = expr.free_symbols
+
Z: ComplexImpedance
if len(symbols) == 0:
Z = ComplexImpedance(expr)
elif len(symbols) == 1:
Z = ComplexImpedance(limit(expr, symbols.pop(), f))
else:
- raise InvalidEquation("Invalid impedance expression! Too many free symbols!")
+ raise InvalidEquation("Invalid impedance expression! Too many free symbols")
+
if isinf(Z):
raise InfiniteLimit(
f"The f -> {f} limit is not finite for the following expression when values are substituted: {str(obj.to_sympy())}"
)
elif isnan(Z):
raise NotANumberImpedance()
+
return Z.astype(ComplexImpedance)
@@ -98,10 +101,15 @@ def _calculate_impedances(
obj: Union["Element", "Container", "Connection"],
f: Frequencies,
) -> ComplexImpedances:
- assert len(f.shape) == 1, f.shape
- assert min(f) >= 0.0
+ if not _is_floating_array(f):
+ f = _cast_to_floating_array(f)
+
+ if min(f) < 0.0:
+ raise ValueError("Negative frequencies are not supported")
+
parameters: Dict[str, float]
subcircuits: Dict[str, Optional[Connection]]
+
func: Callable
if isinstance(obj, Container):
parameters = obj.get_values()
@@ -114,8 +122,10 @@ def _calculate_impedances(
parameters = {}
func = lambda _: obj._impedance(_)
else:
- raise NotImplementedError(f"Unsupported object: '{type(obj)}'!")
+ raise NotImplementedError(f"Unsupported object: '{type(obj)}'")
+
Z: ComplexImpedances = zeros(f.shape, dtype=ComplexImpedance)
+
indices: Indices = array_indices(Z.shape)[0]
limit_indices: Indices = unique(
concatenate(
@@ -126,6 +136,7 @@ def _calculate_impedances(
axis=0,
)
)
+
if limit_indices.size > 0:
Z[limit_indices] = fromiter(
(_calculate_limit(obj, _) for _ in f[limit_indices]),
@@ -133,12 +144,15 @@ def _calculate_impedances(
count=limit_indices.size,
)
indices = delete(indices, limit_indices)
+
if indices.size > 0:
Z[indices] = func(f[indices])
+
if isinf(Z).any():
- raise InfiniteImpedance()
+ raise InfiniteImpedance("Encountered an infinite impedance")
elif isnan(Z).any():
- raise NotANumberImpedance()
+ raise NotANumberImpedance("Encountered an impedance that is not a number (NaN)")
+
return Z.astype(ComplexImpedance)
@@ -167,8 +181,10 @@ def __init__(self, **kwargs):
"Invalid parameter (or subcircuit) key detected! The valid keys are: "
+ ", ".join(self._valid_kwargs_keys)
)
+
self._label: str = ""
self._parameter_value: Dict[str, float] = {}
+
key: str
value: float
for key, value in self._parameter_default_value.items():
@@ -176,6 +192,7 @@ def __init__(self, **kwargs):
self._parameter_value[key] = value
else:
self._parameter_value[key] = float(kwargs[key])
+
self._parameter_lower_limit: Dict[
str, float
] = self._parameter_default_lower_limit.copy()
@@ -186,9 +203,10 @@ def __init__(self, **kwargs):
def __copy__(self) -> "Element":
return (
- type(self)(**self.get_values())
+ type(self)()
.set_lower_limits(**self.get_lower_limits())
.set_upper_limits(**self.get_upper_limits())
+ .set_values(**self.get_values())
.set_fixed(**self.are_fixed())
.set_label(self._label)
)
@@ -196,9 +214,11 @@ def __copy__(self) -> "Element":
def __deepcopy__(self, memo: dict) -> "Element":
ident: int = id(self)
copy: Optional["Element"] = memo.get(ident)
+
if copy is None:
copy = self.__copy__()
memo[ident] = copy
+
return copy
def __repr__(self) -> str:
@@ -208,7 +228,7 @@ def __str__(self) -> str:
return self.get_description()
@classmethod
- def get_extended_description(Class) -> str:
+ def get_extended_description(cls) -> str:
"""
Get an extended description of this element.
@@ -216,11 +236,15 @@ def get_extended_description(Class) -> str:
-------
str
"""
- assert isinstance(Class.__doc__, str)
- return Class.__doc__
+ if not isinstance(cls.__doc__, str):
+ raise TypeError(
+ f"Expected the element's description to be a string instead of {cls.__doc__=}"
+ )
+
+ return cls.__doc__
@classmethod
- def get_description(Class) -> str:
+ def get_description(cls) -> str:
"""
Get a brief description of this element.
@@ -228,10 +252,10 @@ def get_description(Class) -> str:
-------
str
"""
- return f"{Class._symbol}: {Class._name}" # type: ignore
+ return f"{cls._symbol}: {cls._name}" # type: ignore
@classmethod
- def get_default_values(Class, *args, **kwargs) -> Dict[str, float]:
+ def get_default_values(cls, *args, **kwargs) -> Dict[str, float]:
"""
Get the default values for this element's parameters as a dictionary.
@@ -240,16 +264,18 @@ def get_default_values(Class, *args, **kwargs) -> Dict[str, float]:
Dict[str, float]
"""
if not (args or kwargs):
- return Class._parameter_default_value.copy()
+ return cls._parameter_default_value.copy()
+
results: Dict[str, float] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._parameter_default_value[key]
+ results[key] = cls._parameter_default_value[key]
+
return results
@classmethod
- def get_default_value(Class, key: str) -> float:
+ def get_default_value(cls, key: str) -> float:
"""
Get the default value for a specific parameter.
@@ -262,10 +288,50 @@ def get_default_value(Class, key: str) -> float:
-------
float
"""
- return Class.get_default_values(key)[key]
+ return cls.get_default_values(key)[key]
+
+ @classmethod
+ def set_default_values(cls, *args, **kwargs):
+ """
+ Set the default values for this element's parameters.
+
+ Parameters
+ ----------
+ *args
+ Pairs of string keys and numeric values corresponding to parameters (e.g., `set_values("R", 1.0, "Y", 1e-9)`).
+
+ **kwargs
+ String keys and numeric values corresponding to parameters (e.g., `set_values(R=1.0, Y=1e-9)`).
+ """
+ pairs: dict = kwargs.copy()
+
+ key: Any
+ value: Any
+ if args:
+ if len(args) % 2 != 0:
+ raise ValueError(f"Expected pairs of arguments instead of {args=}")
+
+ args_list: List[Any] = list(args)
+ while args_list:
+ key = args_list.pop(0)
+ value = args_list.pop(0)
+ if key in pairs:
+ raise KeyError(
+ f"The key-value pair {key=} was already defined as a keyword argument"
+ )
+ else:
+ pairs[key] = value
+
+ for key, value in pairs.items():
+ if key not in cls._parameter_default_value:
+ raise KeyError(
+ f"Expected a key that exists in {cls._parameter_default_value.keys()} instead of {key=}"
+ )
+
+ cls._parameter_default_value[key] = float(value)
@classmethod
- def get_symbol(Class) -> str:
+ def get_symbol(cls) -> str:
"""
Get the symbol for this element.
The symbol is used to represent this type of element in a circuit description code.
@@ -274,7 +340,7 @@ def get_symbol(Class) -> str:
-------
str
"""
- return Class._symbol
+ return cls._symbol
def _sympy(
self,
@@ -300,14 +366,20 @@ def to_sympy(self, substitute: bool = False, identifier: int = -1) -> Expr:
-------
|Expr|
"""
- assert isinstance(substitute, bool), substitute
- assert isinstance(identifier, int), identifier
+ if not _is_boolean(substitute):
+ raise TypeError(f"Expected a boolean instead of {substitute=}")
+
+ if not _is_integer(identifier):
+ raise TypeError(f"Expected an integer instead of {identifier=}")
+
substitutions: Dict[str, Union[str, float]] = {}
values: Dict[str, float] = self.get_values()
+
key: str
value: float
for key, value in values.items():
repl: Union[str, float]
+
if not substitute:
if self._label != "":
repl = f"{key}_{self._label}"
@@ -315,13 +387,18 @@ def to_sympy(self, substitute: bool = False, identifier: int = -1) -> Expr:
repl = f"{key}_{identifier}"
else:
repl = f"{key}"
+
elif isposinf(value):
repl = "oo"
+
elif isneginf(value):
repl = "-oo"
+
else:
repl = value
+
substitutions[key] = repl
+
return self._sympy(
substitute=substitute,
identifier=identifier,
@@ -361,13 +438,24 @@ def set_label(self, label: str) -> "Element":
-------
Element
"""
- assert isinstance(label, str), f"{label=}"
+ if not isinstance(label, str):
+ raise TypeError(f"Expected a string instead of {label=}")
+
label = label.strip()
+
if label != "":
- assert all(map(str.isascii, label)) and not all(
- map(str.isdigit, label)
- ), label
+ if not all(map(str.isascii, label)):
+ raise ValueError(
+ f"Expected the label to only contain ASCII characters instead of {label=}"
+ )
+
+ if all(map(str.isdigit, label)):
+ raise ValueError(
+ f"Expected the label to not only contain digits instead of {label=}"
+ )
+
self._label = label
+
return self
def get_name(self) -> str:
@@ -380,6 +468,7 @@ def get_name(self) -> str:
"""
if self._label == "":
return self.get_symbol()
+
return f"{self.get_symbol()}_{self._label}"
def serialize(self, decimals: int = 12) -> str:
@@ -399,32 +488,44 @@ def to_string(self, decimals: int = -1) -> str:
-------
str
"""
- assert issubdtype(type(decimals), integer), decimals
+ if not _is_integer(decimals):
+ raise TypeError(f"Expected an integer instead of {decimals=}")
+
if decimals < 0:
return self.get_symbol()
+
lower_limits: Dict[str, float] = self.get_lower_limits()
upper_limits: Dict[str, float] = self.get_upper_limits()
fixed_values: Dict[str, bool] = self.are_fixed()
parameters: List[str] = []
+
+ symbol: str
+ value: float
for symbol, value in self.get_values().items():
lower: float = lower_limits[symbol]
upper: float = upper_limits[symbol]
fixed: bool = fixed_values[symbol]
string: str = f"{symbol}=" + (f"%.{decimals}E") % value
+
if fixed:
string += "F"
+
if isinf(lower):
string += "/inf"
else:
string += (f"/%.{decimals}E") % lower
+
if isinf(upper):
string += "/inf"
else:
string += (f"/%.{decimals}E") % upper
+
parameters.append(string)
+
cdc: str = self.get_symbol() + "{" + ",".join(parameters)
if self._label != "":
cdc += f":{self._label}"
+
return cdc + "}"
def reset_parameters(self, *args, **kwargs):
@@ -478,15 +579,17 @@ def are_fixed(self, *args, **kwargs) -> Dict[str, bool]:
"""
if not (args or kwargs):
return self._parameter_fixed.copy()
+
results: Dict[str, bool] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
results[key] = self._parameter_fixed[key]
+
return results
@classmethod
- def are_fixed_by_default(Class, *args, **kwargs) -> Dict[str, bool]:
+ def are_fixed_by_default(cls, *args, **kwargs) -> Dict[str, bool]:
"""
Get a dictionary that maps parameter keys to whether or not those parameters have fixed values by default.
@@ -504,12 +607,14 @@ def are_fixed_by_default(Class, *args, **kwargs) -> Dict[str, bool]:
Dict[str, bool]
"""
if not (args or kwargs):
- return Class._parameter_default_fixed.copy()
+ return cls._parameter_default_fixed.copy()
+
results: Dict[str, bool] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._parameter_default_fixed[key]
+ results[key] = cls._parameter_default_fixed[key]
+
return results
def is_fixed(self, key: str) -> bool:
@@ -528,7 +633,7 @@ def is_fixed(self, key: str) -> bool:
return self.are_fixed(key)[key]
@classmethod
- def is_fixed_by_default(Class, key: str) -> bool:
+ def is_fixed_by_default(cls, key: str) -> bool:
"""
Get whether or not a specific parameter has a fixed value by default.
@@ -541,7 +646,7 @@ def is_fixed_by_default(Class, key: str) -> bool:
-------
bool
"""
- return Class.are_fixed_by_default(key)[key]
+ return cls.are_fixed_by_default(key)[key]
def set_fixed(self, *args, **kwargs) -> "Element":
"""
@@ -559,23 +664,36 @@ def set_fixed(self, *args, **kwargs) -> "Element":
-------
Element
"""
+ pairs: dict = kwargs.copy()
+
key: Any
value: Any
if args:
- assert len(args) % 2 == 0
+ if len(args) % 2 != 0:
+ raise ValueError(f"Expected pairs of arguments instead of {args=}")
+
args_list: List[Any] = list(args)
while args_list:
key = args_list.pop(0)
value = args_list.pop(0)
- assert isinstance(key, str), key
- assert key in self._parameter_fixed, key
- assert isinstance(value, bool), value
- self._parameter_fixed[key] = value
- for key, value in kwargs.items():
- assert isinstance(key, str), key
- assert key in self._parameter_fixed, key
- assert isinstance(value, bool), value
+ if key in pairs:
+ raise KeyError(
+ f"The key-value pair {key=} was already defined as a keyword argument"
+ )
+ else:
+ pairs[key] = value
+
+ for key, value in pairs.items():
+ if key not in self._parameter_fixed:
+ raise KeyError(
+ f"Expected a key that exists in {self._parameter_fixed} instead of {key=}"
+ )
+
+ if not _is_boolean(value):
+ raise TypeError(f"Expected a boolean instead of {value=}")
+
self._parameter_fixed[key] = value
+
return self
def get_lower_limits(self, *args, **kwargs) -> Dict[str, float]:
@@ -597,11 +715,13 @@ def get_lower_limits(self, *args, **kwargs) -> Dict[str, float]:
"""
if not (args or kwargs):
return self._parameter_lower_limit.copy()
+
results: Dict[str, float] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
results[key] = self._parameter_lower_limit[key]
+
return results
def get_lower_limit(self, key: str) -> float:
@@ -620,7 +740,7 @@ def get_lower_limit(self, key: str) -> float:
return self.get_lower_limits(key)[key]
@classmethod
- def get_default_lower_limits(Class, *args, **kwargs) -> Dict[str, float]:
+ def get_default_lower_limits(cls, *args, **kwargs) -> Dict[str, float]:
"""
Get a dictionary that maps parameter keys to their default lower limits.
@@ -638,16 +758,18 @@ def get_default_lower_limits(Class, *args, **kwargs) -> Dict[str, float]:
Dict[str, float]
"""
if not (args or kwargs):
- return Class._parameter_default_lower_limit.copy()
+ return cls._parameter_default_lower_limit.copy()
+
results: Dict[str, float] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._parameter_default_lower_limit[key]
+ results[key] = cls._parameter_default_lower_limit[key]
+
return results
@classmethod
- def get_default_lower_limit(Class, key: str) -> float:
+ def get_default_lower_limit(cls, key: str) -> float:
"""
Get the default lower limit for a specific parameter.
@@ -660,11 +782,12 @@ def get_default_lower_limit(Class, key: str) -> float:
-------
float
"""
- return Class.get_default_lower_limits(key)[key]
+ return cls.get_default_lower_limits(key)[key]
def set_lower_limits(self, *args, **kwargs) -> "Element":
"""
Set lower limits for parameters.
+ Lower limits are used during circuit fitting.
Parameters
----------
@@ -678,36 +801,42 @@ def set_lower_limits(self, *args, **kwargs) -> "Element":
-------
Element
"""
+ pairs: dict = kwargs.copy()
+
key: Any
value: Any
if args:
- assert len(args) % 2 == 0
+ if len(args) % 2 != 0:
+ raise ValueError(f"Expected pairs of arguments instead of {args=}")
+
args_list: List[Any] = list(args)
while args_list:
key = args_list.pop(0)
- assert isinstance(key, str), f"Expected a string key but got '{key}'!"
- assert (
- key in self._parameter_lower_limit
- ), f"Invalid parameter key: '{key}'!"
- value = float(args_list.pop(0))
- assert (
- value < self._parameter_upper_limit[key]
- ), f"Lower limit must be less than the upper limit ({self._parameter_upper_limit[key]})!"
- if self._parameter_value[key] < value:
- self._parameter_value[key] = value
- self._parameter_lower_limit[key] = value
- for key, value in kwargs.items():
- assert isinstance(key, str), f"Expected a string key but got '{key}'!"
- assert (
- key in self._parameter_lower_limit
- ), f"Invalid parameter key: '{key}'!"
+ value = args_list.pop(0)
+ if key in pairs:
+ raise KeyError(
+ f"The key-value pair {key=} was already defined as a keyword argument"
+ )
+ else:
+ pairs[key] = value
+
+ for key, value in pairs.items():
+ if key not in self._parameter_lower_limit:
+ raise KeyError(
+ f"Expected a key that exists in {self._parameter_lower_limit} instead of {key=}"
+ )
+
value = float(value)
- assert (
- value < self._parameter_upper_limit[key]
- ), f"Lower limit must be less than the upper limit ({self._parameter_upper_limit[key]})!"
+ if value >= self._parameter_upper_limit[key]:
+ raise ValueError(
+ f"Expected the new value of {key=} ({value}) to be less than the current upper limit of {self._parameter_upper_limit[key]}"
+ )
+
if self._parameter_value[key] < value:
self._parameter_value[key] = value
+
self._parameter_lower_limit[key] = value
+
return self
def get_upper_limits(self, *args, **kwargs) -> Dict[str, float]:
@@ -729,11 +858,13 @@ def get_upper_limits(self, *args, **kwargs) -> Dict[str, float]:
"""
if not (args or kwargs):
return self._parameter_upper_limit.copy()
+
results: Dict[str, float] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
results[key] = self._parameter_upper_limit[key]
+
return results
def get_upper_limit(self, key: str) -> float:
@@ -752,7 +883,7 @@ def get_upper_limit(self, key: str) -> float:
return self.get_upper_limits(key)[key]
@classmethod
- def get_default_upper_limits(Class, *args, **kwargs) -> Dict[str, float]:
+ def get_default_upper_limits(cls, *args, **kwargs) -> Dict[str, float]:
"""
Get a dictionary that maps parameter keys to their default upper limits.
@@ -770,16 +901,18 @@ def get_default_upper_limits(Class, *args, **kwargs) -> Dict[str, float]:
Dict[str, float]
"""
if not (args or kwargs):
- return Class._parameter_default_upper_limit.copy()
+ return cls._parameter_default_upper_limit.copy()
+
results: Dict[str, float] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._parameter_default_upper_limit[key]
+ results[key] = cls._parameter_default_upper_limit[key]
+
return results
@classmethod
- def get_default_upper_limit(Class, key: str) -> float:
+ def get_default_upper_limit(cls, key: str) -> float:
"""
Get the default upper limit for a specific parameter.
@@ -792,11 +925,12 @@ def get_default_upper_limit(Class, key: str) -> float:
-------
float
"""
- return Class.get_default_upper_limits(key)[key]
+ return cls.get_default_upper_limits(key)[key]
def set_upper_limits(self, *args, **kwargs) -> "Element":
"""
Set upper limits for parameters.
+ Upper limits are used during circuit fitting.
Parameters
----------
@@ -810,36 +944,42 @@ def set_upper_limits(self, *args, **kwargs) -> "Element":
-------
Element
"""
+ pairs: dict = kwargs.copy()
+
key: Any
value: Any
if args:
- assert len(args) % 2 == 0
+ if len(args) % 2 != 0:
+ raise ValueError(f"Expected pairs of arguments instead of {args=}")
+
args_list: List[Any] = list(args)
while args_list:
key = args_list.pop(0)
- assert isinstance(key, str), f"Expected a string key but got '{key}'!"
- assert (
- key in self._parameter_upper_limit
- ), f"Invalid parameter key: '{key}'!"
- value = float(args_list.pop(0))
- assert (
- value > self._parameter_lower_limit[key]
- ), f"Upper limit must be greater than the lower limit ({self._parameter_lower_limit[key]})!"
- if self._parameter_value[key] > value:
- self._parameter_value[key] = value
- self._parameter_upper_limit[key] = value
- for key, value in kwargs.items():
- assert isinstance(key, str), f"Expected a string key but got '{key}'!"
- assert (
- key in self._parameter_upper_limit
- ), f"Invalid parameter key: '{key}'!"
+ value = args_list.pop(0)
+ if key in pairs:
+ raise KeyError(
+ f"The key-value pair {key=} was already defined as a keyword argument"
+ )
+ else:
+ pairs[key] = value
+
+ for key, value in pairs.items():
+ if key not in self._parameter_upper_limit:
+ raise KeyError(
+ f"Expected a key that exists in {self._parameter_upper_limit.keys()} instead of {key=}"
+ )
+
value = float(value)
- assert (
- value > self._parameter_lower_limit[key]
- ), f"Upper limit must be greater than the lower limit ({self._parameter_lower_limit[key]})!"
+ if value <= self._parameter_lower_limit[key]:
+ raise ValueError(
+ f"Expected the new value of {key=} ({value}) to be greater than the current lower limit of {self._parameter_lower_limit[key]}"
+ )
+
if self._parameter_value[key] > value:
self._parameter_value[key] = value
+
self._parameter_upper_limit[key] = value
+
return self
def get_values(self, *args, **kwargs) -> Dict[str, float]:
@@ -861,11 +1001,13 @@ def get_values(self, *args, **kwargs) -> Dict[str, float]:
"""
if not (args or kwargs):
return self._parameter_value.copy()
+
results: Dict[str, float] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
results[key] = self._parameter_value[key]
+
return results
def get_value(self, key: str) -> float:
@@ -899,25 +1041,37 @@ def set_values(self, *args, **kwargs) -> "Element":
-------
Element
"""
+ pairs: dict = kwargs.copy()
+
key: Any
value: Any
if args:
- assert len(args) % 2 == 0
+ if len(args) % 2 != 0:
+ raise ValueError(f"Expected pairs of arguments instead of {args=}")
+
args_list: List[Any] = list(args)
while args_list:
key = args_list.pop(0)
value = args_list.pop(0)
- assert isinstance(key, str), key
- assert key in self._parameter_value, key
- self._parameter_value[key] = float(value)
- for key, value in kwargs.items():
- assert isinstance(key, str), key
- assert key in self._parameter_value, key
+ if key in pairs:
+ raise KeyError(
+ f"The key-value pair {key=} was already defined as a keyword argument"
+ )
+ else:
+ pairs[key] = value
+
+ for key, value in pairs.items():
+ if key not in self._parameter_value:
+ raise KeyError(
+ f"Expected a key that exists in {self._parameter_value.keys()} instead of {key=}"
+ )
+
self._parameter_value[key] = float(value)
+
return self
@classmethod
- def get_units(Class, *args, **kwargs) -> Dict[str, str]:
+ def get_units(cls, *args, **kwargs) -> Dict[str, str]:
"""
Get a dictionary that maps parameter keys to their corresponding units.
@@ -935,16 +1089,18 @@ def get_units(Class, *args, **kwargs) -> Dict[str, str]:
Dict[str, str]
"""
if not (args or kwargs):
- return Class._parameter_unit.copy()
+ return cls._parameter_unit.copy()
+
results: Dict[str, str] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._parameter_unit[key]
+ results[key] = cls._parameter_unit[key]
+
return results
@classmethod
- def get_unit(Class, key: str) -> str:
+ def get_unit(cls, key: str) -> str:
"""
Get the unit for a specific parameter.
@@ -957,10 +1113,10 @@ def get_unit(Class, key: str) -> str:
-------
str
"""
- return Class.get_units(key)[key]
+ return cls.get_units(key)[key]
@classmethod
- def get_value_descriptions(Class, *args, **kwargs) -> Dict[str, str]:
+ def get_value_descriptions(cls, *args, **kwargs) -> Dict[str, str]:
"""
Get a dictionary that maps parameter keys to their corresponding descriptions.
@@ -978,16 +1134,18 @@ def get_value_descriptions(Class, *args, **kwargs) -> Dict[str, str]:
Dict[str, str]
"""
if not (args or kwargs):
- return Class._parameter_description.copy()
+ return cls._parameter_description.copy()
+
results: Dict[str, str] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._parameter_description[key]
+ results[key] = cls._parameter_description[key]
+
return results
@classmethod
- def get_value_description(Class, key: str) -> str:
+ def get_value_description(cls, key: str) -> str:
"""
Get the description for a specific parameter.
@@ -1000,7 +1158,7 @@ def get_value_description(Class, key: str) -> str:
-------
str
"""
- return Class.get_value_descriptions(key)[key]
+ return cls.get_value_descriptions(key)[key]
@abstractmethod
def _impedance(self, f: Frequencies, **kwargs) -> ComplexImpedances:
@@ -1035,16 +1193,18 @@ def get_impedances(self, frequencies: Frequencies) -> ComplexImpedances:
-------
|ComplexImpedances|
"""
- assert isinstance(frequencies, ndarray), frequencies
return _calculate_impedances(self, frequencies)
class Connection(ABC):
def __init__(self, elements: List[Union[Element, "Connection"]]):
- assert isinstance(elements, list), elements
- assert all(
+ if not isinstance(elements, list) and all(
map(lambda _: isinstance(_, Connection) or isinstance(_, Element), elements)
- )
+ ):
+ raise TypeError(
+ f"Expected a list of Element and/or Connection instances instead of {elements=}"
+ )
+
self._elements: List[Union[Element, "Connection"]] = elements
def __copy__(self) -> "Connection":
@@ -1053,39 +1213,66 @@ def __copy__(self) -> "Connection":
def __deepcopy__(self, memo: dict) -> "Connection":
ident: int = id(self)
copy: Optional["Connection"] = memo.get(ident)
+
if copy is None:
copy = type(self)([_.__deepcopy__(memo) for _ in self._elements])
memo[ident] = copy
+
return copy
+ def __iter__(self) -> List[Union[Element, "Connection"]]:
+ return iter(self._elements)
+
def __repr__(self) -> str:
return f"TODO ({hex(id(self))})"
def __len__(self) -> int:
return len(self._elements)
- def __contains__(self, element_or_connection: Union[Element, "Connection"]) -> bool:
- ec: Union[Element, "Connection"]
- for ec in self.get_elements():
- if ec == element_or_connection:
+ def __contains__(self, item: Union[Element, "Connection"]) -> bool:
+ element_or_connection: Union[Element, "Connection"]
+ for element_or_connection in self._elements:
+ if element_or_connection is item:
return True
- elif isinstance(ec, Container):
- if element_or_connection in ec:
+ elif isinstance(element_or_connection, Connection):
+ if item in element_or_connection:
return True
+ elif isinstance(element_or_connection, Container):
+ if item in element_or_connection:
+ return True
+
return False
+ def _get_all_items_recursive(self) -> List[Union[Element, "Connection"]]:
+ items: List[Union[Element, "Connection"]] = []
+
+ element_or_connection: Union[Element, "Connection"]
+ for element_or_connection in self._elements:
+ if isinstance(element_or_connection, Connection):
+ items.extend(element_or_connection._get_all_items_recursive())
+ else:
+ items.append(element_or_connection)
+
+ return items
+
def _get_elements_recursive(self) -> List[Element]:
connection_type: Type["Connection"] = type(self).__bases__[0]
- queue: List[Union[Element, "Connection"]] = self.get_elements(flattened=True)
+ queue: List[Union[Element, "Connection"]] = self._get_all_items_recursive()
elements: List[Element] = []
+
while queue:
element: Union[Element, "Connection"] = queue.pop(0)
+
if isinstance(element, connection_type):
queue.extend(element._get_elements_recursive())
continue
- assert isinstance(element, Element), element
+
+ if not isinstance(element, Element):
+ raise TypeError(f"Expected an Element instead of {element=}")
+
if element not in elements:
elements.append(element)
+
if isinstance(element, Container):
queue.extend(
filter(
@@ -1093,7 +1280,10 @@ def _get_elements_recursive(self) -> List[Element]:
element.get_subcircuits().values(),
)
)
- assert len(elements) == len(set(elements)), "Detected duplicates!"
+
+ if len(elements) != len(set(elements)):
+ raise ValueError("Detected duplicate elements")
+
return elements
def generate_element_identifiers(self, running: bool) -> Dict[Element, int]:
@@ -1110,19 +1300,23 @@ def generate_element_identifiers(self, running: bool) -> Dict[Element, int]:
-------
Dict[Element, int]
"""
- if running is True:
+ if running:
return {
element: i for i, element in enumerate(self._get_elements_recursive())
}
+
elements: List[Element] = self._get_elements_recursive()
identifiers: Dict[Element, int] = {}
+
element: Element
counts: Dict[str, int] = {element.get_symbol(): 0 for element in elements}
+
for element in elements:
symbol: str = element.get_symbol()
i: int = counts[symbol] + 1
counts[symbol] = i
identifiers[element] = i
+
return identifiers
def contains(
@@ -1146,7 +1340,8 @@ def contains(
bool
"""
if top_level:
- return element_or_connection in self._elements
+ return any((item is element_or_connection for item in self._elements))
+
return element_or_connection in self
def append(self, element_or_connection: Union[Element, "Connection"]):
@@ -1234,6 +1429,7 @@ def index(
"""
if end < 0:
end = len(self._elements)
+
return self._elements.index(element_or_connection, start, end)
def count(self) -> int:
@@ -1265,55 +1461,56 @@ def to_string(self, decimals: int = -1) -> str:
"""
pass
- def get_connections(self, flattened: bool = True) -> List["Connection"]:
+ def get_connections(self, recursive: bool = True) -> List["Connection"]:
"""
Get the connections in this connection.
Parameters
----------
- flattened: bool, optional
- Whether or not the connections should be returned as a list of all connections or as a list connections that may also contain more connections.
+ recursive: bool, optional
+ If True and this Connection contains other Connection instances, then all nested Connect instances are returned.
+ If False, then only the Connection instances within the top level of this Connection are returned.
Returns
-------
List[Connection]
"""
- if flattened:
+ if recursive:
connections: List["Connection"] = []
- for element in self._elements:
- if isinstance(element, Connection):
- connections.append(element)
- connections.extend(element.get_connections(flattened=flattened))
- return list(connections)
- return list(
- filter(lambda _: isinstance(_, Connection), self._elements) # type: ignore
- )
+ for item in self._elements:
+ if isinstance(item, Connection):
+ connections.append(item)
+ connections.extend(item.get_connections(recursive=recursive))
- def get_elements(
- self,
- flattened: bool = True,
- ) -> List[Union[Element, "Connection"]]:
+ return connections
+
+ return [item for item in self._elements if isinstance(item, Connection)]
+
+ def get_elements(self, recursive: bool = True) -> List[Element]:
"""
- Get the elements in this circuit.
+ Get the elements in this connection.
Parameters
----------
- flattened: bool, optional
- Whether or not the elements should be returned as a list of only elements or as a list of connections containing elements.
+ recursive: bool, optional
+ If True and this Connection contains other Connection instances, then all nested elements are returned.
+ If False, then only the Element instances within the top level of this Connection are returned.
Returns
-------
- List[Union[Element, Connection]]
+ List[Element]
"""
- if flattened:
- elements: List[Union[Element, "Connection"]] = []
- for element in self._elements:
- if isinstance(element, Connection):
- elements.extend(element.get_elements(flattened=flattened))
+ if recursive:
+ elements: List[Element] = []
+ for item in self._elements:
+ if isinstance(item, Connection):
+ elements.extend(item.get_elements(recursive=recursive))
else:
- elements.append(element)
+ elements.append(item)
+
return elements
- return self._elements[:]
+
+ return [item for item in self._elements if isinstance(item, Element)]
@abstractmethod
def _impedance(self, f: Frequencies) -> ComplexImpedances:
@@ -1345,7 +1542,6 @@ def get_impedances(self, frequencies: Frequencies) -> ComplexImpedances:
-------
|ComplexImpedances|
"""
- assert isinstance(frequencies, ndarray), frequencies
return _calculate_impedances(self, frequencies)
@abstractmethod
@@ -1413,14 +1609,21 @@ def get_element_name(
-------
str
"""
- assert element in self, "This connection does not contain the provided element!"
+ if element not in self:
+ raise ValueError(f"This connection does not contain {element=}")
+
name: str = element.get_name()
symbol: str = element.get_symbol()
+
if name != symbol:
return name
+
if identifiers is None:
identifiers = self.generate_element_identifiers(running=False)
- assert element in identifiers
+
+ if element not in identifiers:
+ raise ValueError(f"{element=} does not exist in {identifiers=}")
+
return f"{symbol}_{identifiers[element]}"
@@ -1436,6 +1639,7 @@ class Container(Element):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._subcircuit_value: Dict[str, Optional[Connection]] = {}
+
key: str
value: Optional[Connection]
for key, value in self._subcircuit_default_value.items():
@@ -1445,16 +1649,21 @@ def __init__(self, **kwargs):
)
else:
value = kwargs[key]
- assert value is None or isinstance(value, Connection), value
- self._subcircuit_value[key] = value
+ if isinstance(value, Connection) or value is None:
+ self._subcircuit_value[key] = value
+ else:
+ raise TypeError(
+ f"Expected the {key=} subcircuit to be a Connection or None instead of {value=}"
+ )
- def __contains__(self, element_or_connection: Union[Element, Connection]) -> bool:
+ def __contains__(self, item: Union[Element, Connection]) -> bool:
con: Optional[Connection]
for con in self._subcircuit_value.values():
if con is None:
continue
- if element_or_connection in con:
+ if item in con:
return True
+
return False
def __copy__(self) -> "Container":
@@ -1475,6 +1684,7 @@ def __copy__(self) -> "Container":
def __deepcopy__(self, memo: dict) -> "Container":
ident: int = id(self)
copy: Optional["Container"] = memo.get(ident)
+
if copy is None:
copy = (
type(self)(
@@ -1490,7 +1700,10 @@ def __deepcopy__(self, memo: dict) -> "Container":
.set_label(self._label)
)
memo[ident] = copy
- assert copy is not None
+
+ if copy is None:
+ raise ValueError("Expected {copy=} to not be None")
+
return copy
def _sympy(
@@ -1522,14 +1735,22 @@ def to_sympy(
-------
`sympy.Expr`_
"""
- assert isinstance(substitute, bool), substitute
+ if not _is_boolean(substitute):
+ raise TypeError(f"Expected a boolean instead of {substitute=}")
+
if identifiers is None:
identifiers = self.generate_element_identifiers(running=False)
- assert isinstance(identifiers, dict), identifiers
+
+ if not isinstance(identifiers, dict):
+ raise TypeError(
+ f"Expected identifiers to be a dictionary instead of {identifiers=}"
+ )
+
identifier: int = identifiers[self]
substitutions: Dict[str, Union[str, float, Expr]] = {}
values: Dict[str, float] = self.get_values()
subcircuits: Dict[str, Optional[Connection]] = self.get_subcircuits()
+
key: str
value: float
for key, value in values.items():
@@ -1547,14 +1768,18 @@ def to_sympy(
repl = "-oo"
else:
repl = value
+
substitutions[key] = repl
+
con: Optional[Connection]
for key, con in subcircuits.items():
if con is None:
repl = "oo"
else:
repl = con.to_sympy(substitute=substitute, identifiers=identifiers)
+
substitutions[key] = repl
+
return self._sympy(
substitute=substitute,
identifiers=identifiers,
@@ -1581,7 +1806,7 @@ def generate_element_identifiers(self, running: bool) -> Dict[Element, int]:
counts: Dict[str, int] = {}
def process_element(element: Element):
- if running is True:
+ if running:
identifiers[element] = len(identifiers)
else:
symbol: str = element.get_symbol()
@@ -1590,6 +1815,7 @@ def process_element(element: Element):
i: int = counts[symbol] + 1
counts[symbol] = i
identifiers[element] = i
+
if isinstance(element, Container):
subcircuits.extend(
filter(
@@ -1601,21 +1827,28 @@ def process_element(element: Element):
process_element(self)
counts[self.get_symbol()] = 0
identifiers[self] = -1
+
connection: Optional[Connection]
for connection in self.get_subcircuits().values():
if connection is None:
continue
- [process_element(_) for _ in connection.get_elements(flattened=True)]
+ [process_element(_) for _ in connection.get_elements(recursive=True)]
+
return identifiers
def to_string(self, decimals: int = -1) -> str:
cdc: str = super().to_string(decimals=decimals)
+
if decimals < 0 or not cdc.endswith("}"):
return cdc
+
index: int = cdc.find("{") + 1
- assert index > 0
+ if index < 1:
+ raise ValueError(f"Expected the CDC to begin with '{{' instead of {cdc=}")
+
ending: str
cdc, ending = cdc[:index], cdc[index:]
+
key: str
for key in sorted(self._subcircuit_value.keys()):
con: Optional[Connection] = self._subcircuit_value[key]
@@ -1625,12 +1858,14 @@ def to_string(self, decimals: int = -1) -> str:
cdc += f"{key}=short, "
else:
cdc += f"{key}={con.to_string(decimals=decimals)}, "
+
if ending[0] == ":" or ending[0] == "}":
cdc = cdc[:-2]
+
return cdc + ending
@classmethod
- def get_units(Class, *args, **kwargs) -> Dict[str, str]:
+ def get_units(cls, *args, **kwargs) -> Dict[str, str]:
"""
Get a dictionary that maps parameter and/or subcircuit keys to their corresponding units.
@@ -1648,23 +1883,27 @@ def get_units(Class, *args, **kwargs) -> Dict[str, str]:
Dict[str, str]
"""
results: Dict[str, str] = {}
+
if not (args or kwargs):
- results.update(Class._parameter_unit)
- results.update(Class._subcircuit_unit)
+ results.update(cls._parameter_unit)
+ results.update(cls._subcircuit_unit)
+
else:
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- if key in Class._parameter_unit:
- results[key] = Class._parameter_unit[key]
- elif key in Class._subcircuit_unit:
- results[key] = Class._subcircuit_unit[key]
+ if key in cls._parameter_unit:
+ results[key] = cls._parameter_unit[key]
+ elif key in cls._subcircuit_unit:
+ results[key] = cls._subcircuit_unit[key]
else:
- raise Exception(f"Invalid parameter/subcircuit key: '{key}'!")
+ raise KeyError(
+ f"Expected a key that exists in either {cls._parameter_unit.keys()} or {cls._subcircuit_unit} instead of {key=}"
+ )
+
return results
@classmethod
- def get_unit(Class, key: str) -> str:
+ def get_unit(cls, key: str) -> str:
"""
Get the unit for a specific parameter or subcircuit.
@@ -1677,10 +1916,10 @@ def get_unit(Class, key: str) -> str:
-------
str
"""
- return Class.get_units(key)[key]
+ return cls.get_units(key)[key]
@classmethod
- def get_subcircuit_descriptions(Class, *args, **kwargs) -> Dict[str, str]:
+ def get_subcircuit_descriptions(cls, *args, **kwargs) -> Dict[str, str]:
"""
Get a dictionary that maps subcircuit keys to their corresponding descriptions.
@@ -1698,16 +1937,18 @@ def get_subcircuit_descriptions(Class, *args, **kwargs) -> Dict[str, str]:
Dict[str, str]
"""
if not (args or kwargs):
- return Class._subcircuit_description.copy()
+ return cls._subcircuit_description.copy()
+
results: Dict[str, str] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._subcircuit_description[key]
+ results[key] = cls._subcircuit_description[key]
+
return results
@classmethod
- def get_subcircuit_description(Class, key: str) -> str:
+ def get_subcircuit_description(cls, key: str) -> str:
"""
Get the description for a specific subcircuit.
@@ -1720,11 +1961,11 @@ def get_subcircuit_description(Class, key: str) -> str:
-------
str
"""
- return Class.get_subcircuit_descriptions(key)[key]
+ return cls.get_subcircuit_descriptions(key)[key]
@classmethod
def get_default_subcircuits(
- Class, *args, **kwargs
+ cls, *args, **kwargs
) -> Dict[str, Optional[Connection]]:
"""
Get the default values for this element's parameters as a dictionary.
@@ -1734,16 +1975,18 @@ def get_default_subcircuits(
Dict[str, Optional[Connection]]
"""
if not (args or kwargs):
- return Class._subcircuit_default_value.copy()
+ return cls._subcircuit_default_value.copy()
+
results: Dict[str, Optional[Connection]] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
- results[key] = Class._subcircuit_default_value[key]
+ results[key] = cls._subcircuit_default_value[key]
+
return results
@classmethod
- def get_default_subcircuit(Class, key: str) -> Optional[Connection]:
+ def get_default_subcircuit(cls, key: str) -> Optional[Connection]:
"""
Get the default value for a specific subcircuit.
@@ -1756,7 +1999,7 @@ def get_default_subcircuit(Class, key: str) -> Optional[Connection]:
-------
float
"""
- return Class.get_default_subcircuits(key)[key]
+ return cls.get_default_subcircuits(key)[key]
def get_subcircuits(self, *args, **kwargs) -> Dict[str, Optional[Connection]]:
"""
@@ -1777,11 +2020,13 @@ def get_subcircuits(self, *args, **kwargs) -> Dict[str, Optional[Connection]]:
"""
if not (args or kwargs):
return self._subcircuit_value.copy()
+
results: Dict[str, Optional[Connection]] = {}
+
key: Any
for key in set(list(args) + list(kwargs.keys())):
- assert isinstance(key, str), key
results[key] = self._subcircuit_value[key]
+
return results
def get_subcircuit(self, key: str) -> Optional[Connection]:
@@ -1815,21 +2060,36 @@ def set_subcircuits(self, *args, **kwargs) -> "Element":
-------
Element
"""
+ pairs: dict = kwargs.copy()
+
key: Any
value: Any
if args:
- assert len(args) % 2 == 0
+ if len(args) % 2 != 0:
+ raise ValueError(f"Expected pairs of arguments instead of {args=}")
+
args_list: List[Any] = list(args)
while args_list:
key = args_list.pop(0)
value = args_list.pop(0)
- assert isinstance(key, str), key
- assert value is None or isinstance(value, Connection), value
- assert key in self._subcircuit_value, key
+ if key in pairs:
+ raise KeyError(
+ f"The key-value pair {key=} was already defined as a keyword argument"
+ )
+ else:
+ pairs[key] = value
+
+ for key, value in pairs.items():
+ if key not in self._subcircuit_value:
+ raise KeyError(
+ f"Expected a key that exists in {self._subcircuit_value.keys()} instead of {key=}"
+ )
+
+ if isinstance(value, Connection) or value is None:
self._subcircuit_value[key] = value
- for key, value in kwargs.items():
- assert isinstance(key, str), key
- assert value is None or isinstance(value, Connection), value
- assert key in self._subcircuit_value, key
- self._subcircuit_value[key] = value
+ else:
+ raise TypeError(
+ f"Expected the {key=} subcircuit to be a Connection or None instead of {value=}"
+ )
+
return self
diff --git a/src/pyimpspec/circuit/capacitor.py b/src/pyimpspec/circuit/capacitor.py
index 385d18b..80cef3f 100644
--- a/src/pyimpspec/circuit/capacitor.py
+++ b/src/pyimpspec/circuit/capacitor.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/src/pyimpspec/circuit/circuit.py b/src/pyimpspec/circuit/circuit.py
index 21b06b8..5b1b357 100644
--- a/src/pyimpspec/circuit/circuit.py
+++ b/src/pyimpspec/circuit/circuit.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -30,7 +30,7 @@
_calculate_impedances,
)
from .series import Series
-from numpy import ndarray
+from .parallel import Parallel
from sympy import (
Expr,
latex,
@@ -54,13 +54,27 @@ class Circuit:
Parameters
----------
- elements: Series
- The elements of the circuit wrapped in a Series connection.
+ elements: Union[List[Element], Element, Series, Parallel]
+ The elements of the circuit.
"""
def __init__(self, elements: Connection):
- if not isinstance(elements, Series):
+ if isinstance(elements, Series):
+ pass
+ elif isinstance(elements, Parallel):
elements = Series([elements])
+ elif isinstance(elements, Element):
+ elements = Series([elements])
+ elif isinstance(elements, list):
+ if not all(map(lambda e: isinstance(e, Element), elements)):
+ raise TypeError(f"Expected a List[Element] instead of {elements=}")
+ else:
+ elements = Series([elements])
+ else:
+ raise TypeError(
+ f"Expected a List[Element], Element, Series, or Parallel instead of {elements=}"
+ )
+
self._elements: Series = elements
def __copy__(self) -> "Circuit":
@@ -71,13 +85,18 @@ def __copy__(self) -> "Circuit":
def __deepcopy__(self, memo: dict) -> "Circuit":
ident: int = id(self)
copy: Optional["Circuit"] = memo.get(ident)
+
if copy is None:
copy = type(self)(
self._elements.__deepcopy__(memo), # type: ignore
)
memo[ident] = copy
+
return copy
+ def __iter__(self) -> List[Union[Element, Connection]]:
+ return self._elements.__iter__()
+
def __repr__(self) -> str:
return f"Circuit ('{self.to_string()}', {hex(id(self))})"
@@ -90,6 +109,7 @@ def __contains__(self, element_or_connection: Union[Element, Connection]) -> boo
def to_stack(self) -> List[Tuple[str, Union[Element, Connection]]]:
stack: List[Tuple[str, Union[Element, Connection]]] = []
self._elements.to_stack(stack)
+
return stack
def serialize(self, decimals: int = 12) -> str:
@@ -133,46 +153,50 @@ def get_impedances(self, frequencies: Frequencies) -> ComplexImpedances:
-------
|ComplexImpedances|
"""
- assert isinstance(frequencies, ndarray), frequencies
return _calculate_impedances(self._elements, frequencies)
- def get_connections(self, flattened: bool = True) -> List[Connection]:
+ def get_connections(self, recursive: bool = True) -> List[Connection]:
"""
Get the connections in this circuit.
Parameters
----------
- flattened: bool, optional
- Whether or not the connections should be returned as a list of all connections or as a list connections that may also contain more connections.
+ recursive: bool, optional
+ If True and this Circuit contains nested Connection instances, then all of them are returned.
+ If False, then only the top-level Connection is returned.
Returns
-------
List[Connection]
"""
- if flattened is True:
+ if recursive:
connections: List[Connection] = self._elements.get_connections(
- flattened=flattened
+ recursive=recursive
)
connections.insert(0, self._elements)
+
return connections
+
return [self._elements]
- def get_elements(self, flattened: bool = True) -> List[Union[Element, Connection]]:
+ def get_elements(self, recursive: bool = True) -> List[Element]:
"""
Get the elements in this circuit.
Parameters
----------
- flattened: bool, optional
- Whether or not the elements should be returned as a list of only elements or as a list of connections containing elements.
+ recursive: bool, optional
+ If True and there are Element instances nested within Connection instances, then all Element instances are returned.
+ If False, then only the Element instances within the top-level Connection are returned.
Returns
-------
- List[Union[Element, Connection]]
+ List[Element]
"""
- if flattened is True:
- return self._elements.get_elements(flattened=flattened)
- return [self._elements]
+ if recursive:
+ return self._elements.get_elements(recursive=recursive)
+
+ return [item for item in self._elements if isinstance(item, Element)]
def generate_element_identifiers(self, running: bool) -> Dict[Element, int]:
"""
@@ -229,7 +253,9 @@ def to_sympy(self, substitute: bool = False) -> Expr:
substitute=substitute,
identifiers=self.generate_element_identifiers(running=True),
)
- assert isinstance(expr, Expr)
+ if not isinstance(expr, Expr):
+ raise TypeError(f"Expected an Expr instead of {expr=}")
+
return expr
def to_latex(self) -> str:
diff --git a/src/pyimpspec/circuit/circuit_builder.py b/src/pyimpspec/circuit/circuit_builder.py
index d3d2a57..d210997 100644
--- a/src/pyimpspec/circuit/circuit_builder.py
+++ b/src/pyimpspec/circuit/circuit_builder.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,7 +17,10 @@
# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
# the LICENSES folder.
-from typing import (List, Union,)
+from typing import (
+ List,
+ Union,
+)
from pyimpspec.circuit.base import Element
from pyimpspec.circuit.circuit import Circuit
from pyimpspec.circuit.parser import Parser
@@ -42,13 +45,11 @@ def __enter__(self) -> "CircuitBuilder":
def __exit__(self, *args, **kwargs):
if self._is_parallel:
- assert (
- len(self._elements) >= 2
- ), "Parallel connections must contain at least two items (elements and/or other connections)."
+ if len(self._elements) < 2:
+ raise ValueError("Parallel connections must contain at least two items (elements and/or other connections)")
else:
- assert (
- len(self._elements) >= 1
- ), "Series connections must contain at least one item (an element or another connection)."
+ if not (len(self._elements) >= 1):
+ raise ValueError("Series connections must contain at least one item (an element or another connection)")
def __str__(self) -> str:
return self.to_string()
@@ -63,6 +64,7 @@ def series(self) -> "CircuitBuilder":
"""
series: "CircuitBuilder" = CircuitBuilder(parallel=False)
self._elements.append(series)
+
return series
def parallel(self) -> "CircuitBuilder":
@@ -75,10 +77,12 @@ def parallel(self) -> "CircuitBuilder":
"""
parallel: "CircuitBuilder" = CircuitBuilder(parallel=True)
self._elements.append(parallel)
+
return parallel
def __iadd__(self, element: Element) -> "CircuitBuilder":
self.add(element)
+
return self
def add(self, element: Element):
@@ -90,18 +94,23 @@ def add(self, element: Element):
element: Element
The element to add to the current series or parallel connection.
"""
- assert isinstance(element, Element), element
+ if not isinstance(element, Element):
+ raise TypeError(f"Expected an Element instead of {element=}")
+
self._elements.append(element)
def _to_string(self, decimals: int = 12) -> str:
cdc: str = "(" if self._is_parallel else "["
element: Union["CircuitBuilder", Element]
+
for element in self._elements:
if isinstance(element, Element):
cdc += element.to_string(decimals=decimals)
else:
cdc += element._to_string(decimals=decimals)
+
cdc += ")" if self._is_parallel else "]"
+
return cdc
def to_string(self, decimals: int = -1) -> str:
diff --git a/src/pyimpspec/circuit/connections.py b/src/pyimpspec/circuit/connections.py
index 214bb10..dfeaf2e 100644
--- a/src/pyimpspec/circuit/connections.py
+++ b/src/pyimpspec/circuit/connections.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/src/pyimpspec/circuit/constant_phase_element.py b/src/pyimpspec/circuit/constant_phase_element.py
index 4301d6a..6c0d968 100644
--- a/src/pyimpspec/circuit/constant_phase_element.py
+++ b/src/pyimpspec/circuit/constant_phase_element.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/src/pyimpspec/circuit/de_levie.py b/src/pyimpspec/circuit/de_levie.py
index fa2f24a..296cf21 100644
--- a/src/pyimpspec/circuit/de_levie.py
+++ b/src/pyimpspec/circuit/de_levie.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/src/pyimpspec/circuit/diagrams/__init__.py b/src/pyimpspec/circuit/diagrams/__init__.py
index 2b4f0fe..c01b73d 100644
--- a/src/pyimpspec/circuit/diagrams/__init__.py
+++ b/src/pyimpspec/circuit/diagrams/__init__.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/src/pyimpspec/circuit/diagrams/circuitikz.py b/src/pyimpspec/circuit/diagrams/circuitikz.py
index 61ff912..1d2c970 100644
--- a/src/pyimpspec/circuit/diagrams/circuitikz.py
+++ b/src/pyimpspec/circuit/diagrams/circuitikz.py
@@ -1,5 +1,5 @@
# pyimpspec is licensed under the GPLv3 or later (https://www.gnu.org/licenses/gpl-3.0.html).
-# Copyright 2023 pyimpspec developers
+# Copyright 2024 pyimpspec developers
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,14 +17,6 @@
# The licenses of pyimpspec's dependencies and/or sources of portions of code are included in
# the LICENSES folder.
-from typing import (
- Dict,
- List,
- Optional,
- Tuple,
- Type,
- Union,
-)
from pyimpspec.circuit.base import (
Connection,
Element,
@@ -38,6 +30,15 @@
ModifiedInductor,
)
from pyimpspec.circuit.constant_phase_element import ConstantPhaseElement
+from pyimpspec.typing.helpers import (
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+ _is_floating,
+)
def to_circuitikz(
@@ -81,17 +82,45 @@ def to_circuitikz(
-------
str
"""
- assert node_width > 0
- assert node_height > 0
- assert isinstance(left_terminal_label, str), left_terminal_label
- assert isinstance(right_terminal_label, str), right_terminal_label
- assert isinstance(hide_labels, bool), hide_labels
+ if not _is_floating(node_width):
+ raise TypeError(f"Expected a float instead of {node_width=}")
+ elif node_width <= 0.0:
+ raise ValueError(f"Expected a value greater than 0.0 instead of {node_width=}")
+
+ if not _is_floating(node_height):
+ raise TypeError(f"Expected a float instead of {node_height=}")
+ elif node_height <= 0.0:
+ raise ValueError(f"Expected a value greater than 0.0 instead of {node_height=}")
+
+ if not isinstance(left_terminal_label, str):
+ raise TypeError(f"Expected a string instead of {left_terminal_label=}")
+
+ if not isinstance(right_terminal_label, str):
+ raise TypeError(f"Expected a string instead of {right_terminal_label=}")
+
+ if not isinstance(hide_labels, bool):
+ raise TypeError(f"Expected a boolean instead of {hide_labels=}")
+
if hide_labels:
left_terminal_label = ""
right_terminal_label = ""
- assert isinstance(running, bool), running
- assert isinstance(custom_labels, dict) or custom_labels is None, custom_labels
+
+ if not isinstance(running, bool):
+ raise TypeError(f"Expected a boolean instead of {running=}")
+
+ if custom_labels is None:
+ pass
+ elif not isinstance(custom_labels, dict):
+ raise TypeError(f"Expected a dictionary or None instead of {custom_labels=}")
+ elif not all(map(lambda key: isinstance(key, Element), custom_labels.keys())):
+ raise TypeError(
+ f"Expected all keys in {custom_labels=} to be Element instances"
+ )
+ elif not all(map(lambda value: isinstance(value, str), custom_labels.values())):
+ raise TypeError(f"Expected all values in {custom_labels=} to be strings")
+
identifiers: Dict[Element, int] = self.generate_element_identifiers(running=running)
+
# Phase 1 - figure out the dimensions of the connections and the positions of elements.
short_counter: int = 0
dimensions: Dict[Union[Series, Parallel, Element, int], Tuple[float, float]] = {}
@@ -101,6 +130,7 @@ def to_circuitikz(
def short_wire(x: float, y: float) -> Tuple[float, float]:
nonlocal short_counter
short_counter += 1
+
dimensions[short_counter] = (
0.25,
1.0,
@@ -109,6 +139,7 @@ def short_wire(x: float, y: float) -> Tuple[float, float]:
x,
-y,
)
+
return dimensions[short_counter]
def phase_1_element(element: Element, x: float, y: float) -> Tuple[float, float]:
@@ -120,16 +151,17 @@ def phase_1_element(element: Element, x: float, y: float) -> Tuple[float, float]
x,
-y,
)
+
return dimensions[element]
def phase_1_series(series: Series, x: float, y: float) -> Tuple[float, float]:
nonlocal num_nested_parallels
width: float = 0.0
height: float = 0.0
- elements: List[Union[Element, Connection]] = series.get_elements(
- flattened=False
- )
+
+ elements: List[Union[Element, Connection]] = list(iter(series))
num_elements: int = len(elements)
+
i: int
element_connection: Union[Element, Connection]
for i, element_connection in enumerate(elements):
@@ -138,16 +170,19 @@ def phase_1_series(series: Series, x: float, y: float) -> Tuple[float, float]:
width += w
if h > height:
height = h
+
elif type(element_connection) is Parallel:
if num_nested_parallels > 0 and i == 0:
w, h = short_wire(x + width, y)
width += w
if h > height:
height = h
+
w, h = phase_1_parallel(element_connection, x + width, y)
width += w
if h > height:
height = h
+
if num_nested_parallels > 0 and (
i == num_elements - 1
or (i < num_elements - 1 and type(elements[i + 1]) is Parallel)
@@ -156,10 +191,16 @@ def phase_1_series(series: Series, x: float, y: float) -> Tuple[float, float]:
width += w
if h > height:
height = h
+
else:
- assert isinstance(element_connection, Element)
+ if not isinstance(element_connection, Element):
+ raise TypeError(
+ f"Expected an Element instead of {element_connection=}"
+ )
+
w, h = phase_1_element(element_connection, x + width, y)
width += w
+
dimensions[series] = (
max(1, width),
max(1, height),
@@ -168,30 +209,40 @@ def phase_1_series(series: Series, x: float, y: float) -> Tuple[float, float]:
x,
-y,
)
+
return dimensions[series]
def phase_1_parallel(parallel: Parallel, x: float, y: float) -> Tuple[float, float]:
nonlocal num_nested_parallels
num_nested_parallels += 1
+
width: float = 0.0
height: float = 0.0
- for element_connection in parallel.get_elements(flattened=False):
+
+ for element_connection in parallel:
if type(element_connection) is Series:
w, h = phase_1_series(element_connection, x, y + height)
if w > width:
width = w
height += h
+
elif type(element_connection) is Parallel:
w, h = phase_1_parallel(element_connection, x, y + height)
if w > width:
width = w
height += h
+
else:
- assert isinstance(element_connection, Element)
+ if not isinstance(element_connection, Element):
+ raise TypeError(
+ f"Expected an Element instead of {element_connection=}"
+ )
+
w, h = phase_1_element(element_connection, x, y + height)
if w > width:
width = w
height += h
+
dimensions[parallel] = (
max(1, width),
max(1, height),
@@ -200,7 +251,9 @@ def phase_1_parallel(parallel: Parallel, x: float, y: float) -> Tuple[float, flo
x,
-y,
)
+
num_nested_parallels -= 1
+
return dimensions[parallel]
main_connection: Series
@@ -210,9 +263,16 @@ def phase_1_parallel(parallel: Parallel, x: float, y: float) -> Tuple[float, flo
main_connection = Series([self])
else:
main_connection = self
- assert isinstance(main_connection, Series), main_connection
+
+ if not isinstance(main_connection, Series):
+ raise TypeError(f"Expected a Series instead of {main_connection=}")
+
phase_1_series(main_connection, 0, 0)
- assert set(dimensions.keys()) == set(positions.keys())
+
+ if set(dimensions.keys()) != set(positions.keys()):
+ raise ValueError(
+ f"Expected matching sets of keys for dimensions and positions instead of {set(dimensions.keys())=} and {set(positions.keys())=}"
+ )
# Phase 2 - generate the LaTeX source for drawing the circuit diagram.
lines: List[str] = [
@@ -224,6 +284,7 @@ def phase_1_parallel(parallel: Parallel, x: float, y: float) -> Tuple[float, flo
else "",
),
]
+
line: str
symbols: Dict[Type[Element], str] = {
Resistor: "R",
@@ -246,49 +307,66 @@ def replace_variables(
line = line.replace("", str(end_x))
line = line.replace("", str(end_y))
line = line.replace("", element)
+
return line
def phase_2():
for element_connection in positions:
x, y = positions[element_connection]
w, h = dimensions[element_connection]
+
if type(element_connection) is Series:
continue
+
elif type(element_connection) is Parallel:
start_x = x * (node_width - 1.0) + 1.0
start_y = 1.0
end_x = (x + w) * (node_width - 1.0) + 1.0
end_y = 1.0
+
for element in dimensions:
if not element_connection.contains(element, top_level=True):
continue
+
ey = positions[element][1]
+
if start_y > 0.0 or ey > start_y:
start_y = ey
+
if end_y > 0.0 or ey < end_y:
end_y = ey
- assert start_y != end_y
+
+ if start_y == end_y:
+ raise ValueError(f"Expected {start_y=} != {end_y=}")
+
start_y *= node_height
end_y *= node_height
+
line = r"\draw (,) to[] (,);"
lines.append(replace_variables(line, start_x, start_y, end_x, end_y))
line = r"\draw (,) to[] (,);"
lines.append(replace_variables(line, start_x, start_y, end_x, end_y))
+
if w == 1.0:
continue
+
for elem_con in filter(lambda _: type(_) is not Parallel, dimensions):
if not element_connection.contains(elem_con, top_level=True):
continue
+
if w == dimensions[elem_con][0]:
continue
+
if type(elem_con) is Series:
ex, ey = positions[elem_con]
ew, eh = dimensions[elem_con]
else:
ex, ey = positions[elem_con]
ew, eh = dimensions[elem_con]
+
start_x = (ex + ew) * (node_width - 1.0) + 1.0
start_y = ey * node_height
+
# Use the same end_x as the RPar line
end_y = start_y
line = (
@@ -297,12 +375,14 @@ def phase_2():
lines.append(
replace_variables(line, start_x, start_y, end_x, end_y)
)
+
elif isinstance(element_connection, Element):
start_x = x * (node_width - 1.0) + 1.0
start_y = y * node_height
end_x = (x + w) * (node_width - 1.0) + 1.0
end_y = start_y
line = r"\draw (,) to[] (,);"
+
symbol: str
label: str = ""
if not hide_labels:
@@ -317,6 +397,7 @@ def phase_2():
identifiers[element_connection]
)
label = f"{symbol}_{{\\rm {label}}}"
+
symbol = symbols.get(type(element_connection), "generic")
lines.append(
replace_variables(
@@ -328,6 +409,7 @@ def phase_2():
f"{symbol}=${label}$",
)
)
+
elif type(element_connection) is int:
start_x = x * (node_width - 1.0) + 1.0
start_y = y * node_height
@@ -346,13 +428,17 @@ def phase_2():
phase_2()
x, y = positions[main_connection]
w, h = dimensions[main_connection]
+
start_x = (x + w) * (node_width - 1) + 1
end_x = start_x + 1
+
line = r"\draw (,) to[, -o] (,)