From 767581069f0da64cfeef325f6589876cb4cca6d3 Mon Sep 17 00:00:00 2001 From: mariesieger Date: Fri, 15 Nov 2024 15:35:36 +0100 Subject: [PATCH] Add Mishra_model README.md --- .../Mishra_MetabEng2023/README-petab-files.md | 40 +++++++++++++------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/Benchmark-Models/Mishra_MetabEng2023/README-petab-files.md b/Benchmark-Models/Mishra_MetabEng2023/README-petab-files.md index 48b8b0f..f365775 100644 --- a/Benchmark-Models/Mishra_MetabEng2023/README-petab-files.md +++ b/Benchmark-Models/Mishra_MetabEng2023/README-petab-files.md @@ -4,9 +4,15 @@ import pandas as pd import matplotlib.pyplot as plt from matplotlib import cm +from matplotlib import cm # Import cm for colormaps +import matplotlib.pyplot as plt +from sklearn.metrics import r2_score, mean_squared_error + + def plot_measurements_vs_simulation(simulation_df, measurement_df, output_file=None): """ - Generates a logarithmic scatter plot comparing measurements and simulations. + Generates a logarithmic scatter plot comparing measurements and simulations, + with R² and MSE displayed on the plot. Parameters: simulation_df (pd.DataFrame): DataFrame containing simulation data. @@ -28,33 +34,43 @@ def plot_measurements_vs_simulation(simulation_df, measurement_df, output_file=N # Map unique colors to each datasetId unique_datasets = df_merged['datasetId'].unique() - colors = cm.get_cmap('tab10', len(unique_datasets)) # Using 'tab10' color map - - # Initialize the plot + colors = colormaps['tab10'] # Use the updated colormap API + color_list = [colors(i) for i in range(len(unique_datasets))] + + # Plotting plt.figure(figsize=(8, 8)) plt.xscale('log') plt.yscale('log') - - # Scatter plot with color mapping for each datasetId + for i, dataset in enumerate(unique_datasets): subset = df_merged[df_merged['datasetId'] == dataset] - plt.scatter(subset['measurement'], subset['simulation'], color=colors(i), label=dataset) - - # Add bisectrix (y=x line) + plt.scatter(subset['measurement'], subset['simulation'], color=color_list[i], label=dataset) + + # Calculate R² and MSE + r2 = r2_score(df_merged['measurement'], df_merged['simulation']) + mse = mean_squared_error(df_merged['measurement'], df_merged['simulation']) + + # Add R² and MSE to the plot x_min, x_max = df_merged['measurement'].min(), df_merged['measurement'].max() plt.plot([x_min, x_max], [x_min, x_max], 'k--') - - # Labels, title, and legend + plt.text( + x_min * 1.2, x_max / 2, + f"$R^2$: {r2:.3f}\nMSE: {mse:.3e}", + fontsize=10, bbox=dict(facecolor='white', alpha=0.8, edgecolor='gray') + ) + + # Add labels and legend plt.xlabel("Measurement (log scale)") plt.ylabel("Simulation (log scale)") - plt.title("Logarithmic Measurement vs Simulation by Dataset") plt.legend(title="Dataset ID") + plt.show() # Show or save the plot if output_file: plt.savefig(output_file, dpi=300, bbox_inches='tight') else: plt.show() + # Assuming `simulation_df` and `measurement_df` are predefined dataframes plot_measurements_vs_simulation(simulation_df, measurement_df)