Skip to content

Commit

Permalink
Add release_notes.rst
Browse files Browse the repository at this point in the history
  • Loading branch information
breimanntools committed Apr 18, 2024
1 parent da400fc commit b6e46fa
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 15 deletions.
1 change: 1 addition & 0 deletions aaanalysis/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,5 +166,6 @@ def __str__(self) -> str:
"""Return a string representation of the settings dictionary."""
return str(self._settings)


# Global settings instance
options = Settings()
6 changes: 1 addition & 5 deletions aaanalysis/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,6 @@ def _folder_path(super_folder, folder_name):
STR_DICT_CAT = "DICT_CAT"



# I Helper functions
def _retrieve_string_starting_at_end(seq, start=None, end=None):
"""Reverse_string_start_end"""
Expand Down Expand Up @@ -330,9 +329,6 @@ def get_dict_part_seq(tmd=None, jmd_n=None, jmd_c=None):
return part_seq_dict





# II Main functions
# Caching for data loading for better performance (data loaded ones)
@lru_cache(maxsize=None)
Expand Down Expand Up @@ -443,7 +439,7 @@ def plot_get_cmap_(cmap="CPP", n_colors=101, facecolor_dark=None, only_pos=False
if cmap == STR_CMAP_CPP:
cmap = _get_diverging_cmap("RdBu_r", **args)
elif cmap == STR_CMAP_SHAP:
cmap = _get_shap_cmap(**args)
cmap = _get_shap_cmap(**args)
else:
cmap = _get_diverging_cmap(cmap=cmap, **args)
return cmap
Expand Down
85 changes: 85 additions & 0 deletions docs/release_notes.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# Release notes

## Version 0.1 (beta version)

## v0.1.5 (Released 2024-04-18)

### Added
- **Code of Conduct**: Introduced a Code of Conduct to foster a welcoming and inclusive community environment.
We encourage all contributors to review the Code of Conduct to understand the expectations and
responsibilities when participating in the project.

### Changed
- **License update**: Transitioned the project license from MIT to BSD-3-Clause to better align with our project's
community engagement and protection goals. This change affects how the software can be used and redistributed.

### Fixed
- **Multiprocessing**: Replaced native multiprocessing with the `joblib` module for CPP and internal feature matrix
creation. This change prevents a `RuntimeError` that occurred when the main function is not explicitly used.


## v0.1.4 (Released 2024-04-09)

### Added
- **Installation options**: Introduced separate installation profiles for the core and professional versions.
The core version now has reduced dependencies, enhancing installation robustness. For advanced usage, the
professional version includes necessary packages such as SHAP for the AAanalysis explainable AI module
(`TreeModel` and `ShapExplainer`). This version can be installed using `pip install aaanalysis[pro]`.

### Changed
- **API improvements**: General improvement of API for consistency and higher user-friendliness.

### Fixed
- **General issues**: Fix of different check function related API issues.

### Other
- **Python dependency**: Updated the Python version compatibility from <= 3.10 to <= 3.12.


## v0.1.3 (Released 2024-02-09)

### Added
- **TreeModel**: Wrapper class of tree-based models for Monte Carlo estimates of predictions and feature importance.
- **ShapExplainer**: A wrapper for SHAP (SHapley Additive exPlanations) explainers to obtain Monte Carlo estimates
for feature impact.

### Changed
- **API improvements**: General improvement of API for consistency and higher user-friendliness.

### Fixed
- **Interface**: Change of internal documentation decorator to hard coded documentation for better IDE responsiveness.
- **General issues**: Fix of different check function related API issues.

### Other


## v0.1.2 (Released 2023-11-06)

### Added
- **CPPPlot**: Plotting class for CPP features.
- **dPULearnPlot**: Plotting class for results of negative identifications by dPULearn.
- **AAclustPlot**: Plotting class for AAclust clustering results.
- **options**: Set system-level settings by a dictionary-like interface (similar to pandas)

### Changed
- **API improvements**: General improvement of API (Application Programming Interface).

### Fixed
- **API improvements**: General improvement of API (Application Programming Interface).

### Other
- **Python dependency**: Requires Python <= 3.10 (>3.8).


## v0.1.1 (Released 2023-09-11)
Test release of first beta version.


## v0.1.0 (Released 2023-09-11)
First release of beta version including
`CPP <https://aaanalysis.readthedocs.io/en/latest/generated/aaanalysis.CPP.html>`_,
`dPULearn <https://aaanalysis.readthedocs.io/en/latest/generated/aaanalysis.dPULearn.html>`_,
and `AAclust <https://aaanalysis.readthedocs.io/en/latest/generated/aaanalysis.AAclust.html>`_ algorithms
as well as
`SequenceFeature <https://aaanalysis.readthedocs.io/en/latest/generated/aaanalysis.SequenceFeature.html>`_
utility class.
20 changes: 10 additions & 10 deletions tests/unit/dpulearn_tests/test_dpulearn_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def test_X_parameter(self, X):
if size >= 2:
labels = create_labels(X.shape[0])
valid_labels = sum([x for x in set(labels) if x in [1, 2]]) == 2
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
if not is_invalid and valid_labels:
df_pu = dpul.fit(X, labels).df_pu_
assert isinstance(df_pu, pd.DataFrame)
Expand All @@ -61,7 +61,7 @@ def test_labels_parameter(self, labels):
"""Test the 'labels' parameter with valid inputs."""
X = np.random.rand(100, 5) # Assuming 100 samples, 5 features
dpul = aa.dPULearn()
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
valid_labels = sum([x for x in set(labels) if x in [1, 2]]) == 2
if len(set(labels)) < 2 or 1 not in labels or 2 not in labels:
valid_labels = False
Expand All @@ -72,9 +72,9 @@ def test_labels_parameter(self, labels):
def test_labels(self):
"""Test the 'labels' parameter with valid inputs."""
X = np.random.rand(100, 5) # Assuming 100 samples, 5 features
labels = np.asarray([1]* 50 + [2]* 50)
labels = np.asarray([1] * 50 + [2] * 50)
dpul = aa.dPULearn()
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
valid_labels = sum([x for x in set(labels) if x in [1, 2]]) == 2
if not is_invalid and valid_labels:
df_pu = dpul.fit(X, labels).df_pu_
Expand All @@ -88,7 +88,7 @@ def test_n_unl_to_neg_parameter(self, n_unl_to_neg):
labels = create_labels(100)
dpul = aa.dPULearn()
n_unl = sum([x == 2 for x in labels])
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
valid_labels = sum([x for x in set(labels) if x in [1, 2]]) == 2
if not is_invalid and valid_labels and n_unl > n_unl_to_neg :
df_pu = dpul.fit(X, labels, n_unl_to_neg=n_unl_to_neg).df_pu_
Expand All @@ -101,7 +101,7 @@ def test_metric_parameter(self, metric):
X = np.random.rand(100, 5)
labels = create_labels(100)
dpul = aa.dPULearn()
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
if not is_invalid:
for n in [5, 25, 39]:
df_pu = dpul.fit(X, labels, metric=metric, n_unl_to_neg=n).df_pu_
Expand All @@ -115,7 +115,7 @@ def test_n_components_parameter(self, n_components):
labels = create_labels(100)
dpul = aa.dPULearn()
n_samples, n_features = X.shape
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
for i in [5, 26, 39]:
if not is_invalid:
if n_components < min(n_features, n_samples) and n_components not in [0.0, 1.0]:
Expand Down Expand Up @@ -165,7 +165,7 @@ def test_n_unl_to_neg_invalid(self, n_unl_to_neg):
X = np.random.rand(100, 5)
labels = create_labels(100)
dpul = aa.dPULearn()
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
if is_invalid:
with pytest.raises(ValueError):
dpul.fit(X, labels, n_unl_to_neg=n_unl_to_neg)
Expand All @@ -178,7 +178,7 @@ def test_invalid_metric(self, metric):
X = np.random.rand(100, 5)
labels = create_labels(100)
dpul = aa.dPULearn()
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
if not is_invalid and not metric in valid_metrics:
with pytest.raises(ValueError):
dpul.fit(X, labels, metric=metric)
Expand All @@ -191,7 +191,7 @@ def test_n_components_invalid(self, n_components):
X = np.random.rand(100, 5)
labels = create_labels(100)
dpul = aa.dPULearn()
is_invalid = check_invalid_conditions(X=X, labels=labels)
is_invalid = check_invalid_conditions(X=X, labels=labels)
valid_labels = sum([x for x in set(labels) if x in [1, 2]]) == 2
if is_invalid and valid_labels:
with pytest.raises(ValueError):
Expand Down

0 comments on commit b6e46fa

Please sign in to comment.