diff --git a/asteca/cluster.py b/asteca/cluster.py index 9a44be56..b6576e6b 100644 --- a/asteca/cluster.py +++ b/asteca/cluster.py @@ -1,15 +1,16 @@ import warnings -from dataclasses import dataclass import numpy as np import pandas as pd from .modules import cluster_priv as cp from .modules import nmembers as nm -@dataclass class Cluster: """Define a :py:class:`Cluster` object. + This object contains the basic data required to load a group of observed stars + that could represent a cluster or an entire field. + :param obs_df: `pandas DataFrame `__ with the observed loaded data. :type obs_df: pd.DataFrame @@ -42,7 +43,7 @@ class Cluster: :type plx: str | None :param e_plx: Name of the DataFrame column that contains the parallax uncertainty, defaults to ``None`` - :type plx: str | None + :type e_plx: str | None :param pmra: Name of the DataFrame column that contains the RA proper motion, defaults to ``None`` :type pmra: str | None @@ -59,43 +60,71 @@ class Cluster: :type N_clust_min: int :param N_clust_max: Maximum number of cluster members, defaults to ``5000`` :type N_clust_max: int + + :raises ValueError: If there are missing required attributes to generate the + :py:class:`Cluster ` object """ - obs_df: pd.DataFrame - ra: str | None = None - dec: str | None = None - magnitude: str | None = None - e_mag: str | None = None - color: str | None = None - e_color: str | None = None - color2: str | None = None - e_color2: str | None = None - plx: str | None = None - pmra: str | None = None - pmde: str | None = None - e_plx: str | None = None - e_pmra: str | None = None - e_pmde: str | None = None - N_clust_min: int = 25 - N_clust_max: int = 5000 - - def __post_init__(self): - """The photometry is stored with a '_p' to differentiate from the - self.magnitude, self.color, etc that are defined with the class is called. - """ + def __init__( + self, + obs_df: pd.DataFrame, + ra: str | None = None, + dec: str | None = None, + magnitude: str | None = None, + e_mag: str | None = None, + color: str | None = None, + e_color: str | None = None, + color2: str | None = None, + e_color2: str | None = None, + plx: str | None = None, + e_plx: str | None = None, + pmra: str | None = None, + e_pmra: str | None = None, + pmde: str | None = None, + e_pmde: str | None = None, + N_clust_min: int = 25, + N_clust_max: int = 5000, + ) -> None: + self.obs_df = obs_df + self.ra = ra + self.dec = dec + self.magnitude = magnitude + self.e_mag = e_mag + self.color = color + self.e_color = e_color + self.color2 = color2 + self.e_color2 = e_color2 + self.plx = plx + self.pmra = pmra + self.pmde = pmde + self.e_plx = e_plx + self.e_pmra = e_pmra + self.e_pmde = e_pmde + self.N_clust_min = N_clust_min + self.N_clust_max = N_clust_max + self.N_stars = len(self.obs_df) print("\nInstantiating cluster...") print(f"N_stars : {self.N_stars}") print(f"N_clust_min : {self.N_clust_min}") print(f"N_clust_max : {self.N_clust_max}") + dim_count = self._load() + if dim_count > 0: + print("Cluster object generated") + else: + raise ValueError("No column names defined for cluster") + + def _load(self): dim_count = 0 + if self.ra is not None: self.ra_v = self.obs_df[self.ra].values self.dec_v = self.obs_df[self.dec].values print(f"(RA, DEC) : ({self.ra}, {self.dec})") dim_count += 1 + # TODO: change _p to _v if self.magnitude is not None: self.mag_p = self.obs_df[self.magnitude].values if self.e_mag is not None: @@ -148,10 +177,7 @@ def __post_init__(self): print(f"pmDE : {self.pmra} [{self.e_pmde}]") dim_count += 1 - if dim_count > 0: - print("Cluster object generated") - else: - raise ValueError("No column names defined for cluster") + return dim_count def get_center( self, diff --git a/asteca/isochrones.py b/asteca/isochrones.py index 19a3de2f..b1336416 100644 --- a/asteca/isochrones.py +++ b/asteca/isochrones.py @@ -1,10 +1,8 @@ import os import numpy as np -from dataclasses import dataclass from .modules import isochrones_priv -@dataclass class Isochrones: """Define an :py:class:`Isochrones` object. @@ -16,7 +14,7 @@ class Isochrones: `PARSEC `__, `MIST `__, or `BASTI `__. - :type model: str: ``PARSEC, MIST, BASTI`` + :type model: str :param isochs_path: Path to the folder that contains the files for the theoretical isochrones :type isochs_path: str @@ -52,13 +50,6 @@ class Isochrones: :py:meth:`Synthetic.generate() ` method, defaults to ``None`` :type z_to_FeH: float | None - :param column_names: Column names for the initial mass, metallicity, and age for - the photometric system's isochrones files. Example: - ``{"mass_col": "Mini", "met_col": "Zini", "age_col": "logAge"}``. - This dictionary is defined internally in **ASteCA** and should only be given - by the user if the isochrone service changes its format and the `isochrones` - class fails to load the files, defaults to ``None`` - :type column_names: dict, optional :param N_interp: Number of interpolation points used to ensure that all isochrones are the same shape, defaults to ``2500`` :type N_interp: int @@ -67,22 +58,46 @@ class fails to load the files, defaults to ``None`` "`in preparation `__", defaults to ``True`` :type parsec_rm_stage_9: bool + :param column_names: Column names for the initial mass, metallicity, and age for + the photometric system's isochrones files. Example: + ``{"mass_col": "Mini", "met_col": "Zini", "age_col": "logAge"}``. + This dictionary is defined internally in **ASteCA** and should only be given + by the user if the isochrone service changes its format and the `isochrones` + class fails to load the files, defaults to ``None`` + :type column_names: dict | None + + :raises ValueError: If any of the attributes is not recognized as a valid option, + or there are missing required attributes """ - model: str - isochs_path: str - magnitude: str - color: tuple - color2: tuple | None = None - magnitude_effl: float | None = None - color_effl: tuple | None = None - color2_effl: tuple | None = None - z_to_FeH: float | None = None - column_names: dict | None = None - N_interp: int = 2500 - parsec_rm_stage_9: bool = True + def __init__( + self, + model: str, + isochs_path: str, + magnitude: str, + color: tuple, + color2: tuple | None = None, + magnitude_effl: float | None = None, + color_effl: tuple | None = None, + color2_effl: tuple | None = None, + z_to_FeH: float | None = None, + N_interp: int = 2500, + parsec_rm_stage_9: bool = True, + column_names: dict | None = None, + ) -> None: + self.model = model + self.isochs_path = isochs_path + self.magnitude = magnitude + self.color = color + self.color2 = color2 + self.magnitude_effl = magnitude_effl + self.color_effl = color_effl + self.color2_effl = color2_effl + self.z_to_FeH = z_to_FeH + self.column_names = column_names + self.N_interp = N_interp + self.parsec_rm_stage_9 = parsec_rm_stage_9 - def __post_init__(self): # Check that the number of colors match if self.color2 is not None and self.color2_effl is None: raise ValueError( diff --git a/asteca/likelihood.py b/asteca/likelihood.py index cac6d88d..74398287 100644 --- a/asteca/likelihood.py +++ b/asteca/likelihood.py @@ -1,10 +1,8 @@ -from dataclasses import dataclass import numpy as np from .cluster import Cluster from .modules import likelihood_priv as lpriv -@dataclass class Likelihood: """Define a :py:class:`Likelihood` object. @@ -15,24 +13,30 @@ class Likelihood: :param my_cluster: :py:class:`Cluster ` object with the loaded data for the observed cluster - :type my_cluster: :py:class:`Cluster ` + :type my_cluster: Cluster :param lkl_name: Currently only the Poisson likelihood ratio defined in - `Tremmel et al. (2013) `__ + `Tremmel et al. (2013) + `__ is accepted, defaults to ``plr`` :type lkl_name: str :param bin_method: Bin method used to split the color-magnitude diagram into cells - (`Hess diagram `__). If ``manual`` + (`Hess diagram `__); one of: + ``knuth, fixed, bayes_blocks, manual``. If ``manual`` is selected, a list containing an array of edge values for the magnitude, followed by one or two arrays (depending on the number of colors defined) for the color(s), also with edge values, defaults to ``knuth`` - :type bin_method: str: ``knuth, fixed, bayes_blocks, manual`` + :type bin_method: str + + :raises ValueError: If any of the attributes is not recognized as a valid option """ - my_cluster: Cluster - lkl_name: str = "plr" - bin_method: str = "knuth" + def __init__( + self, my_cluster: Cluster, lkl_name: str = "plr", bin_method: str = "knuth" + ) -> None: + self.my_cluster = my_cluster + self.lkl_name = lkl_name + self.bin_method = bin_method - def __post_init__(self): likelihoods = ("plr", "bins_distance") if self.lkl_name not in likelihoods: raise ValueError( @@ -54,7 +58,7 @@ def __post_init__(self): np.array([self.my_cluster.mag_p, *self.my_cluster.colors_p]) ) - print("Likelihood object generated") + print("\nLikelihood object generated") def get(self, synth_clust: np.array) -> float: """Evaluate the selected likelihood function. @@ -70,9 +74,9 @@ def get(self, synth_clust: np.array) -> float: """ if self.lkl_name == "plr": return lpriv.tremmel(self, synth_clust) - if self.lkl_name == "visual": - return lpriv.visual(self, synth_clust) - if self.lkl_name == "mean_dist": - return lpriv.mean_dist(self, synth_clust) + # if self.lkl_name == "visual": + # return lpriv.visual(self, synth_clust) + # if self.lkl_name == "mean_dist": + # return lpriv.mean_dist(self, synth_clust) if self.lkl_name == "bins_distance": return lpriv.bins_distance(self, synth_clust) diff --git a/asteca/membership.py b/asteca/membership.py index 4a67c416..ad0c9ad0 100644 --- a/asteca/membership.py +++ b/asteca/membership.py @@ -1,4 +1,3 @@ -from dataclasses import dataclass import numpy as np from .cluster import Cluster from .modules import cluster_priv as cp @@ -6,7 +5,6 @@ from .modules.bayesian_da import bayesian_mp -@dataclass class Membership: """Define a :py:class:`Membership` object. @@ -27,14 +25,17 @@ class Membership: :py:class:`Cluster ` object. Photometry data is not employed. - :param cluster: :py:class:`Cluster ` object with the + :param my_field: :py:class:`Cluster ` object with the loaded data for the observed field - :type cluster: :py:class:`Cluster ` + :type my_field: Cluster + + :raises ValueError: If there are missing required attributes in the + :py:class:`Cluster ` object """ - my_field: Cluster + def __init__(self, my_field: Cluster) -> None: + self.my_field = my_field - def __post_init__(self): noradeg_flag = False try: self.my_field.ra diff --git a/asteca/modules/likelihood_priv.py b/asteca/modules/likelihood_priv.py index e7c1929e..7a935870 100644 --- a/asteca/modules/likelihood_priv.py +++ b/asteca/modules/likelihood_priv.py @@ -155,163 +155,163 @@ def tremmel(self, synth_clust): return tremmel_lkl -def visual(cluster_dict, synth_clust): - # If synthetic cluster is empty, assign a small likelihood value. - if not synth_clust.any(): - return -1.0e09 - - mag_o, colors_o = cluster_dict["mag"], cluster_dict["colors"] - mag_s, colors_s = synth_clust[0], synth_clust[1:] - - N_mag, N_col = 15, 10 - mag = list(mag_o) + list(mag_s) - col = list(colors_o[0]) + list(colors_s[0]) - mag_min, mag_max = np.nanmin(mag), np.nanmax(mag) - bin_edges = [np.linspace(mag_min, mag_max, N_mag)] - col_min, col_max = np.nanmin(col), np.nanmax(col) - bin_edges.append(np.linspace(col_min, col_max, N_col)) - - # Obtain histogram for observed cluster. - cl_histo_f = [] - for i, col_o in enumerate(colors_o): - hess_diag = np.histogram2d(mag_o, col_o, bins=bin_edges)[0] - # Flatten array - cl_histo_f += list(hess_diag.ravel()) - cl_histo_f = np.array(cl_histo_f) - # Down sample histogram - # msk = cl_histo_f > 5 - # cl_histo_f[msk] = 5 - - syn_histo_f = [] - for i, col_s in enumerate(colors_s): - hess_diag = np.histogram2d(mag_s, col_s, bins=bin_edges)[0] - # Flatten array - syn_histo_f += list(hess_diag.ravel()) - syn_histo_f = np.array(syn_histo_f) - # Down sample histogram - # msk = syn_histo_f > 5 - # syn_histo_f[msk] = 5 - - # return -sum(abs(cluster_dict["hist_down_samp"]-syn_histo_f)) - - # create a mask for where each data set is non-zero - m1 = cl_histo_f != 0 - m2 = syn_histo_f != 0 - m1_area = m1.sum() - m2_area = m2.sum() - tot_area = m1_area + m2_area - # use a logical and to create a combined map where both datasets are non-zero - ovrlp_area = np.logical_and(m1, m2).sum() - - # # calculate the overlapping density, where 0.5 is the bin width - # ol_density = np.abs((cl_histo_f - syn_histo_f) * 0.5)[ol] - # # calculate the total overlap percent - # h_overlap = ol_density.sum() * 100 - - h_overlap = ovrlp_area / tot_area - - # ol_density = np.abs((cl_histo_f - cl_histo_f) * 0.5)[ol] - # h_overlap_0 = ol_density.sum() * 100 - # print(h_overlap_0) - # breakpoint() - - # # cl_histo_f = cluster_dict["hist_down_samp"] - # mi_cnst = np.clip(cl_histo_f, a_min=0, a_max=1) - # # Final chi. - # mig_chi = np.sum((cl_histo_f + mi_cnst - syn_histo_f)**2 / (cl_histo_f + 1.)) - # mig_chi_0 = np.sum((cl_histo_f + mi_cnst)**2 / (cl_histo_f + 1.)) - # mig_chi_opt = np.sum((cl_histo_f + mi_cnst - cl_histo_f)**2 / (cl_histo_f + 1.)) - # print(mig_chi_opt, mig_chi_0, mig_chi) - - # import matplotlib.pyplot as plt - # # plt.subplot(121) - # y_edges, x_edges = bin_edges #cluster_dict['bin_edges'] - # for xe in x_edges: - # plt.axvline(xe, c="grey", ls=":") - # for ye in y_edges: - # plt.axhline(ye, c="grey", ls=":") - # plt.title(h_overlap) - # plt.scatter(colors_o[0], mag_o, alpha=.5) - # plt.scatter(colors_s[0], mag_s, alpha=.5) - # plt.gca().invert_yaxis() - - # # plt.subplot(122) - # # plt.title(h_overlap) - # # plt.bar(np.arange(len(cl_histo_f)), cl_histo_f, label='obs', alpha=.5) - # # plt.bar(np.arange(len(syn_histo_f)), syn_histo_f, label='syn', alpha=.5) - # # plt.legend() - # plt.show() - - return h_overlap - - -def mean_dist(cluster_dict, synth_clust): - # If synthetic cluster is empty, assign a small likelihood value. - if not synth_clust.any(): - return -1.0e09 - - # mag_o, colors_o = cluster_dict['mag'], cluster_dict['colors'] - mag0, colors0 = cluster_dict["mag0"], cluster_dict["colors0"] - mag_s, colors_s = synth_clust[0], synth_clust[1:] - - dist = (np.median(mag0) - np.median(mag_s)) ** 2 + ( - np.median(colors0) - np.median(colors_s) - ) ** 2 - return -dist - - if len(mag_s) < 5: - return -1.0e09 - import ndtest - - P_val = ndtest.ks2d2s(mag0, colors0, mag_s, colors_s[0]) - - # import matplotlib.pyplot as plt - # plt.title(P_val) - # plt.scatter(colors_o0, mag_o, alpha=.5) - # plt.scatter(colors_s[0], mag_s, alpha=.5) - # plt.gca().invert_yaxis() - # plt.show() - - return P_val - - -def bins_distance(cluster_dict, synth_clust): +# def visual(cluster_dict, synth_clust): +# # If synthetic cluster is empty, assign a small likelihood value. +# if not synth_clust.any(): +# return -1.0e09 + +# mag_o, colors_o = cluster_dict["mag"], cluster_dict["colors"] +# mag_s, colors_s = synth_clust[0], synth_clust[1:] + +# N_mag, N_col = 15, 10 +# mag = list(mag_o) + list(mag_s) +# col = list(colors_o[0]) + list(colors_s[0]) +# mag_min, mag_max = np.nanmin(mag), np.nanmax(mag) +# bin_edges = [np.linspace(mag_min, mag_max, N_mag)] +# col_min, col_max = np.nanmin(col), np.nanmax(col) +# bin_edges.append(np.linspace(col_min, col_max, N_col)) + +# # Obtain histogram for observed cluster. +# cl_histo_f = [] +# for i, col_o in enumerate(colors_o): +# hess_diag = np.histogram2d(mag_o, col_o, bins=bin_edges)[0] +# # Flatten array +# cl_histo_f += list(hess_diag.ravel()) +# cl_histo_f = np.array(cl_histo_f) +# # Down sample histogram +# # msk = cl_histo_f > 5 +# # cl_histo_f[msk] = 5 + +# syn_histo_f = [] +# for i, col_s in enumerate(colors_s): +# hess_diag = np.histogram2d(mag_s, col_s, bins=bin_edges)[0] +# # Flatten array +# syn_histo_f += list(hess_diag.ravel()) +# syn_histo_f = np.array(syn_histo_f) +# # Down sample histogram +# # msk = syn_histo_f > 5 +# # syn_histo_f[msk] = 5 + +# # return -sum(abs(cluster_dict["hist_down_samp"]-syn_histo_f)) + +# # create a mask for where each data set is non-zero +# m1 = cl_histo_f != 0 +# m2 = syn_histo_f != 0 +# m1_area = m1.sum() +# m2_area = m2.sum() +# tot_area = m1_area + m2_area +# # use a logical and to create a combined map where both datasets are non-zero +# ovrlp_area = np.logical_and(m1, m2).sum() + +# # # calculate the overlapping density, where 0.5 is the bin width +# # ol_density = np.abs((cl_histo_f - syn_histo_f) * 0.5)[ol] +# # # calculate the total overlap percent +# # h_overlap = ol_density.sum() * 100 + +# h_overlap = ovrlp_area / tot_area + +# # ol_density = np.abs((cl_histo_f - cl_histo_f) * 0.5)[ol] +# # h_overlap_0 = ol_density.sum() * 100 +# # print(h_overlap_0) +# # breakpoint() + +# # # cl_histo_f = cluster_dict["hist_down_samp"] +# # mi_cnst = np.clip(cl_histo_f, a_min=0, a_max=1) +# # # Final chi. +# # mig_chi = np.sum((cl_histo_f + mi_cnst - syn_histo_f)**2 / (cl_histo_f + 1.)) +# # mig_chi_0 = np.sum((cl_histo_f + mi_cnst)**2 / (cl_histo_f + 1.)) +# # mig_chi_opt = np.sum((cl_histo_f + mi_cnst - cl_histo_f)**2 / (cl_histo_f + 1.)) +# # print(mig_chi_opt, mig_chi_0, mig_chi) + +# # import matplotlib.pyplot as plt +# # # plt.subplot(121) +# # y_edges, x_edges = bin_edges #cluster_dict['bin_edges'] +# # for xe in x_edges: +# # plt.axvline(xe, c="grey", ls=":") +# # for ye in y_edges: +# # plt.axhline(ye, c="grey", ls=":") +# # plt.title(h_overlap) +# # plt.scatter(colors_o[0], mag_o, alpha=.5) +# # plt.scatter(colors_s[0], mag_s, alpha=.5) +# # plt.gca().invert_yaxis() + +# # # plt.subplot(122) +# # # plt.title(h_overlap) +# # # plt.bar(np.arange(len(cl_histo_f)), cl_histo_f, label='obs', alpha=.5) +# # # plt.bar(np.arange(len(syn_histo_f)), syn_histo_f, label='syn', alpha=.5) +# # # plt.legend() +# # plt.show() + +# return h_overlap + + +# def mean_dist(cluster_dict, synth_clust): +# # If synthetic cluster is empty, assign a small likelihood value. +# if not synth_clust.any(): +# return -1.0e09 + +# # mag_o, colors_o = cluster_dict['mag'], cluster_dict['colors'] +# mag0, colors0 = cluster_dict["mag0"], cluster_dict["colors0"] +# mag_s, colors_s = synth_clust[0], synth_clust[1:] + +# dist = (np.median(mag0) - np.median(mag_s)) ** 2 + ( +# np.median(colors0) - np.median(colors_s) +# ) ** 2 +# return -dist + +# if len(mag_s) < 5: +# return -1.0e09 +# import ndtest + +# P_val = ndtest.ks2d2s(mag0, colors0, mag_s, colors_s[0]) + +# # import matplotlib.pyplot as plt +# # plt.title(P_val) +# # plt.scatter(colors_o0, mag_o, alpha=.5) +# # plt.scatter(colors_s[0], mag_s, alpha=.5) +# # plt.gca().invert_yaxis() +# # plt.show() + +# return P_val + + +def bins_distance(self, synth_clust): """Sum of distances to corresponding bins in he Hess diagram.""" if not synth_clust.any(): return 1.0e09 - mag0, colors0 = cluster_dict["mag"], cluster_dict["colors"][0] - msk = np.isnan(mag0) | np.isnan(colors0) - mag0, colors0 = mag0[~msk], colors0[~msk] + mag_o, colors_o = self.my_cluster.mag_p, self.my_cluster.colors_p[0] mag_s, colors_s = synth_clust[0], synth_clust[1] - mpercs = (0.1, 0.5, 1, 2, 5, 10, 30, 60, 90) - cpercs = (0.5, 1, 5, 15, 30, 60, 90) + mpercs = (0.5, 10, 20, 30, 40, 50, 60, 70, 80, 90) + cpercs = (0.5, 10, 20, 30, 40, 50, 60, 70, 75, 80, 85, 90, 95) - perc_mag0 = np.percentile(mag0, mpercs) - perc_colors0 = np.percentile(colors0, cpercs) - pts_0 = [] - for pm in perc_mag0: - for pc in perc_colors0: - pts_0.append([pm, pc]) - pts_0 = np.array(pts_0).T + perc_mag_o = np.nanpercentile(mag_o, mpercs) + perc_colors_o = np.nanpercentile(colors_o, cpercs) + pts_o = [] + for pm in perc_mag_o: + for pc in perc_colors_o: + pts_o.append([pm, pc]) + pts_o = np.array(pts_o).T - # import matplotlib.pyplot as plt - # plt.scatter(colors0, mag0) - # plt.scatter(pts_0[1], pts_0[0], c='k') - # plt.gca().invert_yaxis() - # plt.show() - # breakpoint() - - perc_mag_s = np.percentile(mag_s, mpercs) - perc_colors_s = np.percentile(colors_s, cpercs) + perc_mag_s = np.nanpercentile(mag_s, mpercs) + perc_colors_s = np.nanpercentile(colors_s, cpercs) pts_s = [] for pm in perc_mag_s: for pc in perc_colors_s: pts_s.append([pm, pc]) pts_s = np.array(pts_s).T - dist = np.sqrt((pts_s[0] - pts_0[0]) ** 2 + (pts_s[1] - pts_0[1]) ** 2) + # import matplotlib.pyplot as plt + # plt.scatter(colors_o, mag_o, alpha=.25, c='r') + # plt.scatter(colors_s, mag_s, alpha=.25, c='b') + # plt.scatter(pts_o[1], pts_o[0], c='r') + # plt.scatter(pts_s[1], pts_s[0], c='b') + # plt.gca().invert_yaxis() + # plt.show() + + # dist = np.sqrt((pts_s[0] - pts_o[0]) ** 2 + (pts_s[1] - pts_o[1]) ** 2) + dist = (pts_s[0] - pts_o[0]) ** 2 + (pts_s[1] - pts_o[1]) ** 2 weights = np.linspace(1, 0.05, len(mpercs) * len(cpercs)) lkl = sum(dist * weights) diff --git a/asteca/modules/mass_binary.py b/asteca/modules/mass_binary.py index 7140b2be..1a7699b6 100644 --- a/asteca/modules/mass_binary.py +++ b/asteca/modules/mass_binary.py @@ -51,7 +51,7 @@ def get_bpr(self, isoch: np.array, idxs: np.array): # Assign secondary (synthetic) masses to each observed star m2_obs = mass_2[idxs] - # Single systems are identified with m2=np.nan. This mask identifies observed + # Single systems are identified with m2=np.nan. This mask points to observed # stars identified as binary systems m2_msk = ~np.isnan(m2_obs) diff --git a/asteca/modules/synth_cluster_priv.py b/asteca/modules/synth_cluster_priv.py index a6b612fe..1c5eee4c 100644 --- a/asteca/modules/synth_cluster_priv.py +++ b/asteca/modules/synth_cluster_priv.py @@ -1,161 +1,39 @@ import numpy as np from scipy import stats -from scipy.optimize import curve_fit from .imfs import invTrnsfSmpl, sampleInv -def error_distribution(self, mag, e_mag, e_colors): +def error_distribution(mag, e_mag, e_colors, rand_norm_vals): """ - Fit an exponential function to the errors in each photometric dimension, - using the main magnitude as the x coordinate. + Extract the magnitude and color(s) uncertainties to use as error model for the + synthetic clusters. """ - # Mask of not nan values across arrays - nan_msk = np.isnan(mag) | np.isnan(e_mag) - for e_col in e_colors: - nan_msk = nan_msk | np.isnan(e_col) - not_nan_msk = ~nan_msk - # Remove nan values - mag, e_mag = mag[not_nan_msk], e_mag[not_nan_msk] - e_col_non_nan = [] - for e_col in e_colors: - e_col_non_nan.append(e_col[not_nan_msk]) - e_colors = e_col_non_nan - # Left end of magnitude range - if mag.max() - mag.min() > 2: - be_m = max(min(mag) + 1, np.percentile(mag, 0.5)) - else: - be_m = np.percentile(mag, 0.5) - # Width of the intervals in magnitude. - interv_mag = 0.5 - # Number of intervals, three minimum - while True: - delta_mag = mag.max() - be_m - n_interv = int(round(delta_mag / interv_mag)) - if n_interv > 3: - break - interv_mag -= 0.05 - if interv_mag <= 0.1: - break - # - # Not sure why I was using '0.5 * interv_mag', removed 20/06/24 - # steps_x = np.linspace(be_m - 0.5 * interv_mag, mag.max(), n_interv - 1) - steps_x = np.linspace(be_m - interv_mag, mag.max(), n_interv - 1) - - # Median values for each error array in each magnitude range - mag_y = [] - for i, e_mc in enumerate([e_mag] + [list(_) for _ in e_colors]): - x1 = steps_x[0] - e_mc_medians = [] - for x2 in steps_x[1:]: - msk = (mag >= x1) & (mag < x2) - strs_in_range = np.array(e_mc)[msk] - if len(strs_in_range) > 1: - e_mc_medians.append(np.median(strs_in_range)) - else: - # If no stars in interval, use small value - e_mc_medians.append(0.001) - x1 = x2 - mag_y.append(e_mc_medians) - - # Make sure that median error values increase with increasing magnitude. This - # ensures that the 3P exponential fit does not fail - mag_y_new = [] - for e_arr in mag_y: - e_arr_new, v_old = [], np.inf - for i in range(-1, -len(e_arr) - 1, -1): - if e_arr[i] > v_old: - e_arr_new.append(v_old) - else: - e_arr_new.append(e_arr[i]) - v_old = e_arr[i] - e_arr_new.reverse() - mag_y_new.append(e_arr_new) - mag_y = mag_y_new - - # Mid points in magnitude range - mag_x = 0.5 * (steps_x[:-1] + steps_x[1:]) - - # Fit 3-parameter exponential - err_dist = [] - for y in mag_y: - popt_mc = get_3p_pars(mag_x, y, mag) - err_dist.append(popt_mc) + def filnans(data): + msk = np.isnan(data) + data[msk] = np.interp(np.flatnonzero(msk), np.flatnonzero(~msk), data[~msk]) + return data + + # Replace nan values with interpolated values + mag = filnans(mag) + e_mag = filnans(e_mag) + e_colors = [filnans(_) for _ in e_colors] + + # The minus generates reversed sorting in magnitude. This is important so that + # synthetic clusters with a smaller magnitude range are assigned uncertainties + # from the bottom up (i.e.: from the largest to the smallest magnitudes) + idx = np.argsort(-mag) + + # Multiplying by a random normal float centered at 0 with STDDEV=1 generates the + # uncertainty values ready to be added to the synthetic photometry. + N = len(mag) + err_dist = [rand_norm_vals[:N] * e_mag[idx]] + for e_col in e_colors: + err_dist.append(rand_norm_vals[:N] * e_col[idx]) return err_dist -def exp_3p(x, a, b, c): - """ - Three-parameters exponential function. - - This function is tied to the 'synth_cluster.add_errors' function. - """ - return a * np.exp(b * x) + c - - -def exp_2p(x, a, b): - """Two-parameters exponential function. Required for scipy.optimize.curve_fit.""" - return a * np.exp(b * x) - - -def get_3p_pars(mag_x, y, mags): - """Fit 3P or 2P exponential curve.""" - try: - if len(mag_x) >= 3: - # Fit 3-param exponential curve. - popt_mc, _ = curve_fit(exp_3p, mag_x, y) - else: - # If the length of this list is 2, it means that the main - # magnitude length is too small. If this is the case, do not - # attempt to fit a 3 parameter exp function since it will fail. - raise RuntimeError - - # Check a, b values - if popt_mc[0] > 1e5 or popt_mc[1] <= 0: - raise RuntimeError - - # If the 3-param exponential fitting process fails. - except RuntimeError: - print("Error: 3-param exponential error function fit failed. Attempt 2P fit") - try: - # Fit simple 2-params exponential curve. - popt_mc, _ = curve_fit(exp_2p, mag_x, y) - # Insert empty 'c' value to be fed later on to the 3P exponential - # function used to obtain the plotted error bars. This makes the - # 2P exp function equivalent to the 3P exp function, with the - # 'c' parameter equal to 0. - popt_mc = np.insert(popt_mc, 2, 0.0) - - # Check a, b values - if popt_mc[0] > 1e5 or popt_mc[1] <= 0: - raise RuntimeError - - # If the 2-param exponential fitting process also fails, try with a - # 2P exp but using only min and max error values. - except RuntimeError: - print( - "Error: 2-param exponential error function fit failed. Perform " - + "min-max magnitude fit." - ) - # Fit simple 2-params exponential curve. - mags_minmax = [min(mags), max(mags) - (max(mags) - min(mags)) / 20.0] - y_minmax = [min(y), max(y)] - - # OLD code, replaced by manual estimation 20/06/24 - # popt_mc, _ = curve_fit(exp_2p, mags_minmax, y_minmax) - # # Insert 'c' value into exponential function param list. - # popt_mc = np.insert(popt_mc, 2, 0.0) - # Manual coefficients - x0, x1 = mags_minmax - y0, y1 = y_minmax - b = np.log(y0 / y1) / (x0 - x1) - a = y0 / np.exp(b * x0) - popt_mc = np.array([a, b, 0]) - - return popt_mc - - def add_binarity(self) -> np.ndarray: """For each theoretical isochrone defined. 1. Draw random secondary masses for *all* stars. @@ -870,7 +748,7 @@ def mass_interp(isoch_cut, m_ini_idx, st_dist_mass, N_obs_stars): Masses that fall outside of the isochrone's mass range have been previously rejected. """ - # Assumes `mass_ini=isoch_cut[m_ini_idx]` is ordered + # Assumes `mass_ini=isoch_cut[m_ini_idx]` is sorted min to max <-- IMPORTANT mass_ini = isoch_cut[m_ini_idx] # Filter masses in the IMF sampling that are outside of the mass @@ -905,6 +783,7 @@ def mass_interp(isoch_cut, m_ini_idx, st_dist_mass, N_obs_stars): # for i, arr in enumerate(isoch_cut): # isoch_mass[i] = np.interp(mass_dist, mass_ini, arr) + # Interpolate the sampled stars (masses) into the isochrone isoch_mass = interp_mass_isoch(isoch_cut, mass_ini, mass_dist) return isoch_mass @@ -977,110 +856,17 @@ def binarity(alpha, beta, binar_flag, m_ini_idx, rand_unif_vals, isoch_mass): return isoch_mass -def add_errors(isoch_compl, err_dist, rand_norm_vals): +def add_errors(isoch_binar, err_dist): """ Add random synthetic uncertainties to the magnitude and color(s) """ - rnd = rand_norm_vals[: isoch_compl.shape[-1]] - main_mag = isoch_compl[0] - - for i, (a, b, c) in enumerate(err_dist): - # isoch_compl[0] is the main magnitude. - # sigma_mc = getSigmas(isoch_compl[0], popt_mc) - - # Three-parameters exponential function for the uncertainty - # sigma_mc = a * np.exp(b * main_mag) + c - sigma_mc = exp_3p(main_mag, a, b, c) - - # Randomly move stars around these errors. - # isoch_compl[i] = gauss_error(rnd, isoch_compl[i], sigma_mc) - - # Randomly perturb photometry with a Gaussian distribution - isoch_compl[i] += rnd * sigma_mc + N = len(isoch_binar[0]) + mag_sort = np.argsort(-isoch_binar[0]) + for i, sigma in enumerate(err_dist): + isoch_binar[i][mag_sort] += sigma[:N] - return isoch_compl - - -# def generate(self, fit_params, plotflag=False): -# r"""Returns the full synthetic cluster array. - -# This is an almost exact copy of the ``synth_cluster.generate()`` function with -# the only difference that it returns the full array. This is generated: - -# synth_clust = [mag, c1, (c2), m_ini_1, mag_b, c1_b, (c2_b), m_ini_2] - -# where c1 and c2 colors defined, and 'm_ini_1, m_ini_2' are the primary and -# secondary masses of the binary systems. The single systems only have a '0' -# stored in 'm_ini_2'. - -# """ -# # Return proper values for fixed parameters and parameters required -# # for the (z, log(age)) isochrone averaging. -# met, loga, alpha, beta, av, dr, rv, dm, ml, mh, al, ah = properModel( -# self.met_age_dict, self.fix_params, fit_params -# ) - -# # Generate a weighted average isochrone from the (z, log(age)) values in -# # the 'model'. -# isochrone = zaWAverage( -# self.theor_tracks, -# self.met_age_dict, -# self.m_ini_idx, -# met, -# loga, -# ml, -# mh, -# al, -# ah, -# ) - -# # Move theoretical isochrone using the distance modulus -# isoch_moved = move_isochrone(isochrone, self.m_ini_idx, dm) - -# # Apply extinction correction -# isoch_extin = extinction( -# self.ext_law, -# self.ext_coefs, -# self.rand_floats["norm"][0], -# self.rand_floats["unif"][0], -# self.DR_distribution, -# self.m_ini_idx, -# self.binar_flag, -# av, -# dr, -# rv, -# isoch_moved, -# ) - -# # Remove isochrone stars beyond the maximum magnitude -# isoch_cut = cut_max_mag(isoch_extin, self.max_mag_syn) -# if not isoch_cut.any(): -# return np.array([]) -# if plotflag: -# return isoch_cut - -# # Interpolate IMF's sampled masses into the isochrone. -# isoch_mass = mass_interp( -# isoch_cut, self.m_ini_idx, self.st_dist_mass[ml][al], self.N_obs_stars -# ) -# if not isoch_mass.any(): -# return np.array([]) - -# # Assignment of binarity. -# isoch_binar = binarity( -# alpha, -# beta, -# self.binar_flag, -# self.m_ini_idx, -# self.rand_floats["unif"][1], -# isoch_mass, -# ) - -# # Assign errors according to errors distribution. -# synth_clust = add_errors(isoch_binar, self.err_dist, self.rand_floats["norm"][1]) - -# return synth_clust + return isoch_binar # def _rm_low_masses(self, dm_min): diff --git a/asteca/synthetic.py b/asteca/synthetic.py index 29edc31c..4f2a717c 100644 --- a/asteca/synthetic.py +++ b/asteca/synthetic.py @@ -1,5 +1,4 @@ import warnings -from dataclasses import dataclass import numpy as np import pandas as pd from .cluster import Cluster @@ -8,7 +7,6 @@ from .modules import mass_binary as mb -@dataclass class Synthetic: """Define a :py:class:`Synthetic` object. @@ -23,39 +21,53 @@ class Synthetic: :param isochs: :py:class:`Isochrones ` object with the loaded files for the theoretical isochrones - :type isochs: :py:class:`Isochrones ` - :param ext_law: Extinction law. if ``GAIADR3`` is selected, the magnitude and first + :type isochs: Isochrones + :param ext_law: Extinction law, one of ``CCMO, GAIADR3``. + If ``GAIADR3`` is selected, the magnitude and first color defined in the :py:class:`Isochrones ` and :py:class:`Cluster ` objects are assumed to be Gaia's (E)DR3 **G** and **(BP-RP)** respectively. The second color (if defined) will always be affected by the ``CCMO`` model, defaults to ``CCMO`` - :type ext_law: str: ``CCMO, GAIADR3`` + :type ext_law: str :param DR_distribution: Distribution function for the differential reddening, - defaults to ``uniform`` - :type DR_distribution: str: ``uniform, normal`` + one of ``uniform, normal``; defaults to ``uniform`` + :type DR_distribution: str :param IMF_name: Name of the initial mass function used to populate the isochrones, + one of ``salpeter_1955, kroupa_2001, chabrier_2014``; defaults to ``chabrier_2014`` - :type IMF_name: str: ``salpeter_1955, kroupa_2001, chabrier_2014`` + :type IMF_name: str :param max_mass: Maximum total initial mass. Should be large enough to allow generating as many synthetic stars as observed stars, defaults to ``100_000`` :type max_mass: int :param gamma: Distribution function for the mass ratio of the binary systems, + float or one of ``D&K, fisher_stepped, fisher_peaked, raghavan``; defaults to ``D&K`` - :type gamma: str: ``D&K, fisher_stepped, fisher_peaked, raghavan``, float + :type gamma: float | str :param seed: Random seed. If ``None`` a random integer will be generated and used, defaults to ``None`` :type seed: int | None + + :raises ValueError: If any of the attributes is not recognized as a valid option """ - isochs: Isochrones - ext_law: str = "CCMO" - DR_distribution: str = "uniform" - IMF_name: str = "chabrier_2014" - max_mass: int = 100_000 - gamma: float | str = "D&K" - seed: int | None = None + def __init__( + self, + isochs: Isochrones, + ext_law: str = "CCMO", + DR_distribution: str = "uniform", + IMF_name: str = "chabrier_2014", + max_mass: int = 100_000, + gamma: float | str = "D&K", + seed: int | None = None, + ) -> None: + self.isochs = isochs + self.ext_law = ext_law + self.DR_distribution = DR_distribution + self.IMF_name = IMF_name + self.max_mass = max_mass + self.gamma = gamma + self.seed = seed - def __post_init__(self): if self.seed is None: self.seed = np.random.randint(100000) @@ -168,18 +180,25 @@ def calibrate(self, cluster: Cluster, fix_params: dict = {}): + "was defined in 'synthetic'." ) - # Used by the mass and binary probability estimation - self.mag_p = cluster.mag_p - self.colors_p = cluster.colors_p - # Data used by the `generate()` method + self.m_ini_idx = 2 # (0->mag, 1->color, 2->mass_ini) + if self.isochs.color2_effl is not None: + self.m_ini_idx = 3 # (0->mag, 1->color, 2->color2, 3->mass_ini) + self.max_mag_syn = max(cluster.mag_p) self.N_obs_stars = len(cluster.mag_p) - self.m_ini_idx = len(cluster.colors_p) + 1 self.err_dist = scp.error_distribution( - self, cluster.mag_p, cluster.e_mag_p, cluster.e_colors_p + cluster.mag_p, + cluster.e_mag_p, + cluster.e_colors_p, + self.rand_floats["norm"][1], ) + # Used by the `get_models()` method and its result by the `stellar_masses()` + # and `binary_fraction()` methods + self.mag_p = cluster.mag_p + self.colors_p = cluster.colors_p + self.fix_params = fix_params self.binar_flag = True @@ -293,9 +312,7 @@ def generate( ) # Assign errors according to errors distribution. - synth_clust = scp.add_errors( - isoch_binar, self.err_dist, self.rand_floats["norm"][1] - ) + synth_clust = scp.add_errors(isoch_binar, self.err_dist) if full_arr_flag: return synth_clust @@ -362,8 +379,14 @@ def get_models( self.R_xy = R_xy print("") - print("Model :", ", ".join(f"{k}: {v}" for k, v in model.items())) - print("Model STDDEV :", ", ".join(f"{k}: {v}" for k, v in model_std.items())) + print( + "Model :", + ", ".join(f"{k}: {round(v, 3)}" for k, v in model.items()), + ) + print( + "Model STDDEV :", + ", ".join(f"{k}: {round(v, 3)}" for k, v in model_std.items()), + ) print(f"N_models : {N_models}") print("Attributes stored in Synthetic object") @@ -424,8 +447,7 @@ def stellar_masses( def binary_fraction( self, ) -> np.array: - """Estimate individual masses for the observed stars, along with their binary - probabilities (if binarity was estimated). + """Estimate the total binary fraction for the observed cluster. :return: Distribution of total binary fraction values for the cluster :rtype: np.array diff --git a/docs/CHANGELOG.rst b/docs/CHANGELOG.rst index bdf03f4e..0935d36d 100644 --- a/docs/CHANGELOG.rst +++ b/docs/CHANGELOG.rst @@ -1,5 +1,13 @@ .. :changelog: +`[v0.5.6] `__ - 2024-06-25 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +- Simpler error distribution function +- Convert `dataclass` to `class` (simpler API documentation) + + + `[v0.5.5] `__ - 2024-06-21 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/docs/basic/isochrones_load.rst b/docs/basic/isochrones_load.rst index ba6910af..adc4a288 100644 --- a/docs/basic/isochrones_load.rst +++ b/docs/basic/isochrones_load.rst @@ -19,6 +19,8 @@ produced with these services will contain: * BASTI : single metallicity and single age +.. _isoch_loading: + Loading the files ***************** @@ -65,8 +67,41 @@ the documentation for the `Filter Profile Service `_ of the Spanish Virtual Observatory. -See :py:mod:`asteca.isochrones` for more information on the arguments of the -:class:`isochrones` class. +There are a few more optional arguments that can be used when loading the isochrones. +The user can refer to :py:mod:`asteca.isochrones` for more information on the arguments +of the :class:`isochrones` class. + +One of those extra arguments is ``z_to_FeH``, used to transform metallicity values from +he default ``z`` to the logarithmic version ``FeH`` (set to ``None`` by default). +If you want to generate your synthetic cluster models using +``FeH`` instead of ``z``, then this argument must be changed to the solar ``z`` +metallicity value for the isochrones. +For example, if you are using PARSEC isochrones a solar metallicity of +``z=0.0152`` is recommended (see +`CMD input form `_), which means that +you would load your isochrones as: + +.. code-block:: python + + isochs = asteca.isochrones( + model="PARSEC", + isochs_path="isochrones/", + magnitude="Gmag", + color=("G_BPmag", "G_RPmag"), + magnitude_effl=6390.7, + color_effl=(5182.58, 7825.08), + z_to_FeH=0.0152 + ) + +If this argument is not changed from its default then the ``z`` parameter will be used +to generate synthetic clusters, as shown in the section :ref:`ref_generating`. + +Another extra argument is ``N_interp``, which controls the isochrones interpolation +(set to ``2500`` by default). A smaller value con be used to lower the amount of memory +used by this class, but it comes at the expense of more coarse synthetic clusters being +generated later on (since the isochrones will be interpolated with fewer points and will +thus contain less resolution). + Please `contact me `_ if you have any issues with the loading process of the theoretical isochrones. diff --git a/docs/basic/synthetic_clusters.rst b/docs/basic/synthetic_clusters.rst index 3db2acfd..002fe448 100644 --- a/docs/basic/synthetic_clusters.rst +++ b/docs/basic/synthetic_clusters.rst @@ -150,11 +150,11 @@ distribution. Calibrating the object ********************** -After instantiating a ``synthcl`` object through a :class:`synthetic` class (using an -:class:`isochrones` object and the required initial arguments: IMF, ``gamma``, etc), we -need to calibrate it with our observed cluster. This process collects required data from -the :class:`cluster` object (defined as ``my_cluster`` in :ref:`cluster_load`), as well -as reading the fixed fundamental parameters (if any), and some initialization arguments. +After instantiating a ``synthcl`` object through a :py:class:`asteca.synthetic.Synthetic` class (using an :py:class:`asteca.isochrones.Isochrones` object and the required initial arguments: IMF, ``gamma``, etc), we need to calibrate it with our observed cluster. +This process collects required data from +the :py:class:`asteca.cluster.Cluster` object (defined as ``my_cluster`` in +:ref:`cluster_load`), as well as reading the fixed fundamental parameters (if any), and some initialization arguments. + The basic configuration looks like this: .. code-block:: python @@ -171,31 +171,22 @@ mention here that the ``fix_params`` dictionary is optional. If you choose not t any parameters, then all the fundamental parameters will be expected when calling the ``synthcl`` object to generate a synthetic cluster. -There is one more optional argument that can be used when calibrating the -``synthcl`` object: ``z_to_FeH``. This argument is used to transform metallicity values -from he default ``z`` (obtained from the loaded isochrones) to the logarithmic version -``FeH``, and it is set to ``None`` by default. If you want to fit your synthetic cluster -models using ``FeH`` instead of ``z``, then this argument must be changed to the solar -``z`` metallicity value for the isochrones defined in the :class:`isochrones` object. -For example, if you are using PARSEC isochrones which have a solar metallicity of -``z=0.0152`` (see `CMD input form `_), then -you would calibrate the ``synthcl`` object as: - -.. code-block:: python - - synthcl.calibrate(my_cluster, fix_params, z_to_FeH=0.0152) +The photometric uncertainties in the synthetic clusters are modeled after the observed +photometric uncertainties. The algorithm employed by **ASteCA** is to simply transport +the observed uncertainty values in magnitude and color(s) to the generated synthetic +stars. This way no approximation to the distribution of photometric uncertainties is +required. -If this argument is not changed from its default then the ``z`` parameter will be used -to generate synthetic clusters, as shown in the next section. +.. _ref_generating: Generating synthetic clusters ***************************** Once the calibration is complete, we can generate synthetic clusters by simply passing a dictionary with the fundamental parameters to be fitted to the -:meth:`generate` method of our :class:`synthetic` object. **ASteCA** currently accepts +:py:meth:`asteca.synthetic.Synthetic.generate` method. **ASteCA** currently accepts eight parameters, related to three intrinsic and two extrinsic cluster characteristics: - *Intrinsic*: metallicity (``met``), age (``loga``), and binarity (``alpha, beta``) @@ -211,16 +202,10 @@ Intrinsic parameters ==================== The valid ranges for the metallicity and logarithmic age are inherited from the -theoretical isochrone(s) loaded in the :class:`isochrones` object. The minimum and -maximum stored values for these parameters can be obtained calling the :meth:`min_max` -method of our :class:`synthcl` object: - -.. code-block:: python - - met_min, met_max, loga_min, loga_max = synthcl.min_max() +theoretical isochrone(s) loaded in the :py:class:`asteca.isochrones.Isochrones` object. The metallicity, ``met``, can be modeled either as ``z`` or ``FeH`` as -explained in the previous section. The age parameter, ``loga``, is modeled as the +explained in section :ref:`isoch_loading`. The age parameter, ``loga``, is modeled as the logarithmic age. The ``alpha, beta`` parameters determine the fraction of binary systems diff --git a/docs/conf.py b/docs/conf.py index c4cd09e9..33356272 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -47,6 +47,7 @@ "IPython.sphinxext.ipython_console_highlighting", "sphinx_math_dollar", "sphinx.ext.mathjax", + # "sphinx.ext.doctest" ] autodoc2_packages = [ @@ -61,10 +62,37 @@ autodoc2_hidden_objects = ["dunder", "private", "inherited"] +# DEPRECATED 24/06/24 ################################################################ # This block removes the attributes from the apidocs .rst files. # I could not find a simpler way to do this 26/05/24 # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx-core-events +# def source_read_handler(app, docname, source): +# """'docname, source' not used but required""" +# path = "./apidocs/asteca/" +# for file in os.listdir(path): +# with open(path + file) as f: +# lines = f.readlines() +# idxs = [] +# for i, line in enumerate(lines): +# if ".. py:attribute:" in line: +# idxs += list(range(i, i + 6)) +# for i in range(len(lines), 0, -1): +# if i in idxs: +# del lines[i] +# with open(path + file, "w") as f: +# for line in lines: +# f.write(line) + + +# def setup(app): +# app.connect("source-read", source_read_handler) +################################################################ + + +################################################################ +# This block changes the 'Initialization' title for a 'Methods' title +# WARNING: could break at any moment if pydoclint changes its behavior! def source_read_handler(app, docname, source): """'docname, source' not used but required""" path = "./apidocs/asteca/" @@ -73,14 +101,19 @@ def source_read_handler(app, docname, source): lines = f.readlines() idxs = [] for i, line in enumerate(lines): - if ".. py:attribute:" in line: - idxs += list(range(i, i + 6)) - for i in range(len(lines), 0, -1): - if i in idxs: - del lines[i] - with open(path + file, "w") as f: - for line in lines: - f.write(line) + if ".. rubric:: Initialization" in line: + idxs += list(range(i, i + 3)) + break + if idxs: + if len(lines) > idxs[-1] + 2: + lines[idxs[0]] = " .. rubric:: Methods\n" + else: + lines[idxs[0]] = "" + lines[idxs[1]] = "" + lines[idxs[2]] = "" + with open(path + file, "w") as f: + for line in lines: + f.write(line) def setup(app): diff --git a/docs/index.rst b/docs/index.rst index d056e5fd..2bb42e7d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,7 +15,7 @@ extinction, distance, metallicity, age, binarity, mass, etc.. .. important:: - Version |ProjectVersion| released on the 21st of June, 2024. See :ref:`changelog` + Version |ProjectVersion| released on the 25th of June, 2024. See :ref:`changelog` for a detailed list of the changes implemented. diff --git a/pyproject.toml b/pyproject.toml index 5963f8f1..642cc85c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "asteca" -version = "0.5.5" +version = "0.5.6" description = "Stellar cluster analysis package" authors = ["Gabriel I Perren "] readme = "README.md"