Skip to content

Commit

Permalink
Release 0.1.19
Browse files Browse the repository at this point in the history
Merge pull request #65 from PEtab-dev/release_0.1.19
  • Loading branch information
dweindl authored May 28, 2021
2 parents 0917194 + b6c2333 commit 99b2d21
Show file tree
Hide file tree
Showing 17 changed files with 2,024 additions and 45 deletions.
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,13 @@

## 0.1 series

### 0.1.19

* Visualization: refactoring (#58) including various bug fixes
* Validation: Fixed detection of missing observable/noise parameter overrides
(#64)
* Optional relative paths in generated YAML (#57)

### 0.1.18

* Fixed various documentation issues
Expand Down
3 changes: 3 additions & 0 deletions doc/example/example_Fujita/visuSpecs/Fujita_visuSpec_3.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
plotId xOffset yScale
figure_b 100 log
figure_a 500 lin
17 changes: 17 additions & 0 deletions petab/C.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,23 @@
Y_SCALE = 'yScale'
LEGEND_ENTRY = 'legendEntry'

VISUALIZATION_DF_REQUIRED_COLS = [PLOT_ID]

VISUALIZATION_DF_OPTIONAL_COLS = [
PLOT_NAME, PLOT_TYPE_SIMULATION, PLOT_TYPE_DATA, X_VALUES, X_OFFSET,
X_LABEL, X_SCALE, Y_VALUES, Y_OFFSET, Y_LABEL, Y_SCALE, LEGEND_ENTRY,
DATASET_ID]

VISUALIZATION_DF_COLS = [
*VISUALIZATION_DF_REQUIRED_COLS, *VISUALIZATION_DF_OPTIONAL_COLS]

VISUALIZATION_DF_SUBPLOT_LEVEL_COLS = [
PLOT_ID, PLOT_NAME, PLOT_TYPE_SIMULATION, PLOT_TYPE_DATA,
X_LABEL, X_SCALE, Y_LABEL, Y_SCALE]

VISUALIZATION_DF_SINGLE_PLOT_LEVEL_COLS = [
X_VALUES, X_OFFSET, Y_VALUES, Y_OFFSET, LEGEND_ENTRY, DATASET_ID]

LINE_PLOT = 'LinePlot'
BAR_PLOT = 'BarPlot'
SCATTER_PLOT = 'ScatterPlot'
Expand Down
8 changes: 4 additions & 4 deletions petab/measurements.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,13 +289,13 @@ def assert_overrides_match_parameter_count(
actual = len(split_parameter_replacement_list(
row.get(OBSERVABLE_PARAMETERS, None)))
# No overrides are also allowed
if actual not in [0, expected]:
if actual != expected:
formula = observable_df.loc[row[OBSERVABLE_ID], OBSERVABLE_FORMULA]
raise AssertionError(
f'Mismatch of observable parameter overrides for '
f'{row[OBSERVABLE_ID]} ({formula})'
f'in:\n{row}\n'
f'Expected 0 or {expected} but got {actual}')
f'Expected {expected} but got {actual}')

# check noise parameters
replacements = split_parameter_replacement_list(
Expand All @@ -304,10 +304,10 @@ def assert_overrides_match_parameter_count(
expected = noise_parameters_count[row[OBSERVABLE_ID]]

# No overrides are also allowed
if not (len(replacements) == 0 or len(replacements) == expected):
if len(replacements) != expected:
raise AssertionError(
f'Mismatch of noise parameter overrides in:\n{row}\n'
f'Expected 0 or {expected} but got {actual}')
f'Expected {expected} but got {actual}')
except KeyError:
# no overrides defined, but a numerical sigma can be provided
# anyways
Expand Down
9 changes: 1 addition & 8 deletions petab/parameter_mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,14 +360,7 @@ def _apply_overrides_for_observable(
"""
for i, override in enumerate(overrides):
overridee_id = f'{override_type}Parameter{i+1}_{observable_id}'
try:
mapping[overridee_id] = override
except KeyError as e:
raise TypeError(f'Cannot override {override_type} parameter '
f'{overridee_id} for observable {observable_id}.'
f'Ensure there exists an {override_type} '
'definition containing the correct number of '
'placeholder parameters.') from e
mapping[overridee_id] = override


def _apply_condition_parameters(par_mapping: ParMappingDict,
Expand Down
9 changes: 7 additions & 2 deletions petab/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,8 @@ def to_files(self,
parameter_file: Optional[str] = None,
visualization_file: Optional[str] = None,
observable_file: Optional[str] = None,
yaml_file: Optional[str] = None) -> None:
yaml_file: Optional[str] = None,
relative_paths: bool = True,) -> None:
"""
Write PEtab tables to files for this problem
Expand All @@ -289,6 +290,9 @@ def to_files(self,
visualization_file: Visualization table destination
observable_file: Observables table destination
yaml_file: YAML file destination
relative_paths: whether all paths in the YAML file should be
relative to the location of the YAML file. If `False`, then paths
are left unchanged.
Raises:
ValueError:
Expand Down Expand Up @@ -344,7 +348,8 @@ def error(name: str) -> ValueError:
yaml.create_problem_yaml(sbml_file, condition_file,
measurement_file, parameter_file,
observable_file, yaml_file,
visualization_file)
visualization_file,
relative_paths=relative_paths,)

def get_optimization_parameters(self):
"""
Expand Down
2 changes: 1 addition & 1 deletion petab/version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
"""PEtab library version"""
__version__ = '0.1.18'
__version__ = '0.1.19'
6 changes: 6 additions & 0 deletions petab/visualize/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,16 @@
from .plot_data_and_simulation import (plot_data_and_simulation,
plot_petab_problem,
plot_measurements_by_observable,
plot_without_vis_spec,
plot_with_vis_spec,
plot_problem,
save_vis_spec)

__all__ = ["plot_data_and_simulation",
"plot_petab_problem",
"plot_measurements_by_observable",
"plot_without_vis_spec",
"plot_with_vis_spec",
"plot_problem",
"save_vis_spec"
]
111 changes: 107 additions & 4 deletions petab/visualize/helper_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from typing import Dict, List, Optional, Tuple, Union

sns.set()
# sns.set() This messes up plotting settings if one just imports this file

# for typehints
IdsList = List[str]
Expand All @@ -42,14 +42,16 @@ def import_from_files(
"""
Helper function for plotting data and simulations, which imports data
from PEtab files. If `visualization_file_path` is not provided, the
visualisation specification DataFrame will be generated automatically.
visualization specification DataFrame will be generated automatically.
For documentation, see main function plot_data_and_simulation()
Returns:
A tuple of experimental data, experimental conditions,
visualization specification and simulation data DataFrames.
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# import measurement data and experimental condition
exp_data = petab.get_measurement_df(data_file_path)
Expand Down Expand Up @@ -95,6 +97,8 @@ def check_vis_spec_consistency(
group_by:
Specifies the grouping of data to plot.
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# We have no vis_spec file. Check how data should be grouped
group_by = ''
Expand Down Expand Up @@ -187,6 +191,9 @@ def create_dataset_id_list(
For additional documentation, see main function plot_data_and_simulation()
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# create a column of dummy datasetIDs and legend entries: preallocate
dataset_id_column = []
legend_dict = {}
Expand All @@ -197,7 +204,7 @@ def create_dataset_id_list(
tmp_obs = list(exp_data[OBSERVABLE_ID])
for ind, cond_id in enumerate(tmp_simcond):
# create and add dummy datasetID
dataset_id = tmp_simcond[ind] + '_' + tmp_obs[ind]
dataset_id = cond_id + '_' + tmp_obs[ind]
dataset_id_column.append(dataset_id)

# create nicer legend entries from condition names instead of IDs
Expand Down Expand Up @@ -272,6 +279,77 @@ def create_dataset_id_list(
return exp_data, dataset_id_list, legend_dict, yvalues_dict


def generate_dataset_id_col(exp_data: pd.DataFrame) -> List[str]:
"""
generate DATASET_ID column from condition_ids and observable_ids
Parameters
----------
exp_data
Returns
-------
"""

# create a column of dummy datasetIDs and legend entries: preallocate
dataset_id_column = []

# loop over experimental data table, create datasetId for each entry
tmp_simcond = list(exp_data[SIMULATION_CONDITION_ID])
tmp_obs = list(exp_data[OBSERVABLE_ID])

for ind, cond_id in enumerate(tmp_simcond):
# create and add dummy datasetID
dataset_id = cond_id + '_' + tmp_obs[ind]
dataset_id_column.append(dataset_id)

return dataset_id_column


def create_dataset_id_list_new(df: pd.DataFrame,
group_by: str,
id_list: List[IdsList]
) -> List[IdsList]:
"""
Create dataset id list.
Parameters:
df: measurements or simulations df
group_by: defines grouping of data to plot
id_list:
Returns:
A list of datasetIds
"""
if DATASET_ID not in df.columns:
raise ValueError(f'{DATASET_ID} column must be in exp_data DataFrame')

dataset_id_list = []

if group_by == 'simulation':
groupping_col = SIMULATION_CONDITION_ID
elif group_by == 'observable':
groupping_col = OBSERVABLE_ID
if id_list is None:
# this is the default case. If no grouping is specified,
# all observables are plotted. One observable per plot.
unique_obs_list = df[OBSERVABLE_ID].unique()
id_list = [[obs_id] for obs_id in unique_obs_list]
else:
raise ValueError

for sublist in id_list:
plot_id_list = []
for cond_id in sublist:
plot_id_list.extend(list(
df[df[groupping_col] == cond_id][
DATASET_ID].unique()))
dataset_id_list.append(plot_id_list)
return dataset_id_list


def create_figure(
uni_plot_ids: np.ndarray,
plots_to_file: bool) -> Tuple[plt.Figure,
Expand All @@ -292,6 +370,8 @@ def create_figure(
fig: Figure object of the created plot.
ax: Axis object of the created plot.
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# Set Options for plots
# possible options: see: plt.rcParams.keys()
Expand Down Expand Up @@ -415,6 +495,8 @@ def get_vis_spec_dependent_columns_dict(
columns PLOT_ID, DATASET_ID, LEGEND_ENTRY, Y_VALUES for visualization
specification file.
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# check consistency of settings
group_by = check_vis_spec_consistency(
Expand Down Expand Up @@ -533,6 +615,9 @@ def create_or_update_vis_spec(
A tuple of visualization specification DataFrame and experimental
DataFrame.
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

if vis_spec is None:
# create dataframe
exp_data, columns_dict = \
Expand Down Expand Up @@ -572,7 +657,9 @@ def create_or_update_vis_spec(
vis_spec = expand_vis_spec_settings(vis_spec, columns_dict)

# if dataset_id is there, then nothing to expand?
vis_spec[PLOT_TYPE_DATA] = plotted_noise

if PLOT_TYPE_DATA not in vis_spec.columns:
vis_spec[PLOT_TYPE_DATA] = plotted_noise

# check columns, and add non-mandatory default columns
vis_spec = check_ex_visu_columns(vis_spec, exp_data, exp_conditions)
Expand All @@ -589,6 +676,9 @@ def check_ex_visu_columns(vis_spec: pd.DataFrame,
Returns:
Updated visualization specification DataFrame
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

if PLOT_NAME not in vis_spec.columns:
vis_spec[PLOT_NAME] = ''
if PLOT_TYPE_SIMULATION not in vis_spec.columns:
Expand Down Expand Up @@ -658,6 +748,9 @@ def check_ex_exp_columns(
A tuple of experimental DataFrame, list of datasetIds and
dictionary of plot legends, corresponding to the datasetIds
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

data_type = MEASUREMENT
if sim:
data_type = SIMULATION
Expand Down Expand Up @@ -692,6 +785,7 @@ def check_ex_exp_columns(
value='')
legend_dict = {}
if DATASET_ID not in exp_data.columns:
# TODO: ?
if dataset_id_list is not None:
exp_data.insert(loc=4, column=DATASET_ID,
value=dataset_id_list)
Expand All @@ -712,6 +806,7 @@ def check_ex_exp_columns(
sim_cond_id_list, sim_cond_num_list, observable_id_list,
observable_num_list, exp_data, exp_conditions, group_by)

# if DATASET_ID is in exp_data.columns, legend dict will be empty
return exp_data, dataset_id_list, legend_dict


Expand All @@ -723,6 +818,9 @@ def handle_dataset_plot(plot_spec: pd.Series,
"""
Handle dataset plot
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# get datasetID and independent variable of first entry of plot1
dataset_id = plot_spec[DATASET_ID]
indep_var = plot_spec[X_VALUES]
Expand Down Expand Up @@ -795,6 +893,9 @@ def matches_plot_spec(df: pd.DataFrame,
Boolean series that can be used for subsetting of the passed
dataframe
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

subset = (
(df[col_id] == x_value) &
(df[DATASET_ID] == plot_spec[DATASET_ID])
Expand Down Expand Up @@ -843,6 +944,8 @@ def get_data_to_plot(plot_spec: pd.Series,
Contains the data which should be plotted
(Mean and Std)
"""
warnings.warn("This function will be removed in future releases. ",
DeprecationWarning)

# create empty dataframe for means and SDs
data_to_plot = pd.DataFrame(
Expand Down
Loading

0 comments on commit 99b2d21

Please sign in to comment.