diff --git a/.cspell.json b/.cspell.json index ae092f87..3674d17a 100644 --- a/.cspell.json +++ b/.cspell.json @@ -62,6 +62,7 @@ "blatt", "breit", "chromodynamics", + "Clebsch", "compwa", "conda", "Curvenote", @@ -70,9 +71,12 @@ "defaultdict", "dummifies", "dummify", + "eigenstates", "eval", "flatté", "functools", + "GlueX", + "Gordan", "Hankel", "helicities", "helicity", @@ -203,6 +207,7 @@ "epsabs", "epsrel", "eqnarray", + "errordef", "evaluatable", "expertsystem", "facecolor", @@ -231,7 +236,9 @@ "hoverinfo", "hspace", "hypotests", + "iframe", "imag", + "iminuit", "infty", "ioff", "iplt", @@ -278,6 +285,7 @@ "maxdepth", "maxsize", "meshgrid", + "migrad", "mmikhasenko", "mname", "mplot3d", @@ -291,6 +299,7 @@ "nbconvert", "nbformat", "nbmake", + "nbody", "nbsp", "ncols", "ndarray", diff --git a/docs/conf.py b/docs/conf.py index 8b3da5f3..52bd1ddf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -44,6 +44,7 @@ def get_nb_exclusion_patterns() -> list[str]: "report/020*", "report/021*", "report/022*", + "report/033*", } julia_notebooks = { "report/019*", @@ -218,12 +219,12 @@ def install_ijulia() -> None: "https://atom.io", # often instable "https://doi.org/10.1002/andp.19955070504", # 403 for onlinelibrary.wiley.com "https://doi.org/10.1155/2020/6674595", # 403 hindawi.com + "https://doi.org/10.7566/JPSCP.26.022002", # 403 for journals.jps.jp "https://downloads.hindawi.com", # 403 "https://github.com/organizations/ComPWA/settings/repository-defaults", # private - "https://github.com/orgs/ComPWA/projects/5", # private - "https://github.com/orgs/ComPWA/projects/6", # private "https://ieeexplore.ieee.org/document/6312940", # 401 "https://indico.ific.uv.es/event/6803", # SSL error + "https://journals.aps.org", "https://leetcode.com", "https://mybinder.org", # often instable "https://open.vscode.dev", @@ -232,6 +233,8 @@ def install_ijulia() -> None: "https://via.placeholder.com", # irregular timeout "https://www.andiamo.co.uk/resources/iso-language-codes", # 443, but works "https://www.bookfinder.com", + r"https://github.com/ComPWA/RUB-EP1-AG/.*", # private + r"https://github.com/orgs/ComPWA/projects/\d+", # private ] myst_enable_extensions = [ "amsmath", diff --git a/docs/develop.md b/docs/develop.md index 70d8b2d9..de693421 100644 --- a/docs/develop.md +++ b/docs/develop.md @@ -636,12 +636,7 @@ EXECUTE_NB= tox -e doclive ``` :::{tip} -Notebooks are automatically formatted through {ref}`pre-commit ` -(see {ref}`develop:Formatting`). If you want to format the notebooks automatically as -you're working, you can do so with -[`jupyterlab-code-formatter`](https://ryantam626.github.io/jupyterlab_code_formatter/index.html), -which is automatically -{ref}`installed with the dev requirements `. +Notebooks are automatically formatted through {ref}`pre-commit ` (see {ref}`develop:Formatting`). If you want to format the notebooks automatically as you're working, you can do so with [`jupyterlab-code-formatter`](https://jupyterlab-code-formatter.readthedocs.io), which is automatically {ref}`installed with the dev requirements `. ::: #### IJulia notebooks diff --git a/docs/report/033.ipynb b/docs/report/033.ipynb new file mode 100644 index 00000000..1a13d126 --- /dev/null +++ b/docs/report/033.ipynb @@ -0,0 +1,3685 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "```{autolink-concat}\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "3d", + "kinematics", + "physics", + "tutorial" + ] + }, + "source": [ + "::::{margin}\n", + ":::{card} PWA101: Amplitude analysis with Python basics\n", + "TR-033\n", + "^^^\n", + "This document introduces amplitude analysis, and specifically the technique called Partial Wave Analysis (PWA), by demonstrating its application to a specific reaction channel and amplitude model. The tutorial uses basic Python programming and libraries (e.g. [`numpy`](https://numpy.org/doc/stable), [`scipy`](https://docs.scipy.org/doc/scipy), etc.) are used to illustrate the more fundamental steps of PWA in hadron physics.\n", + "+++\n", + "✅ [ComPWA/RUB-EP1-AG#93](https://github.com/ComPWA/RUB-EP1-AG/issues/93), [compwa.github.io#217](https://github.com/ComPWA/compwa.github.io/pull/217)\n", + ":::\n", + "::::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "# Amplitude Analysis 101 (PWA 101)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "%pip install -q iminuit==2.26.0 matplotlib==3.9.1 numpy==1.26.4 pandas==2.2.2 particle==0.24.0 phasespace==1.10.3 scipy==1.14.0 tqdm==4.66.4 vector==1.4.1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{admonition} Abstract\n", + "This document introduces Amplitude Analysis / Partial Wave Analysis (PWA) by demonstrating its application to a specific reaction channel and amplitude model. It aims to equip readers with a basic understanding of the full workflow and methodologies of PWA in hadron physics through a practical, hands-on example. Only basic Python programming and libraries (e.g. [`numpy`](https://numpy.org/doc/stable), [`scipy`](https://docs.scipy.org/doc/scipy), etc.) are used to illustrate the more fundamental steps in a PWA. Calculations with 4-vectors in this report are performed with the [`vector`](https://vector.readthedocs.io/en/latest/usage/intro.html) package.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{seealso}\n", + "A follow-up tutorial, [PWA101 v2.0](https://compwa.github.io/gluex-nstar), is being prepared in [ComPWA/gluex-nstar#13](https://github.com/ComPWA/gluex-nstar/pull/13). Whereas this report focuses on common, numerical Python libraries, v2.0 formulates the amplitude model with a {doc}`symbolic approach`.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When performing an amplitude analysis, one usually starts with a long process of selecting data ('events') from a particle collider experiment for specific decay channels. It is a delicate process with the intend to select as much background noise from other decay channels, while maintaining sufficient signal events that an amplitude model can be 'fit' to it. This tutorial will not consider this event selection, but will focus on amplitude model formulation and fitting it to a data sample that is assumed to be come from experiment. As such, the tutorial is built up of the following main steps:\n", + "\n", + "1. [Formulate amplitude model](#amplitude-model).\n", + "2. [Visualize and inspect model](#visualization).\n", + "3. [Generate toy data](#generate-data).\n", + "4. [Fit model to data distribution](#fit-model).\n", + "\n", + "The following Python packages are all that we require to go through each of the steps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "mystnb": { + "code_prompt_show": "Hide warnings" + }, + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "import logging\n", + "import os\n", + "import warnings\n", + "\n", + "os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n", + "logging.disable(logging.WARNING)\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "mystnb": { + "code_prompt_show": "Import Python libraries" + }, + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "import matplotlib as mpl\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", + "import phasespace\n", + "import scipy as sp\n", + "import tensorflow as tf\n", + "import vector\n", + "from iminuit import Minuit\n", + "from matplotlib import gridspec\n", + "from tqdm.auto import tqdm\n", + "from vector.backends.numpy import MomentumNumpy4D" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "(amplitude-model)=\n", + "## Amplitude model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "### Theory" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "\n", + "This amplitude model is adapted from the [Lecture 11 in STRONG2020 HaSP School](https://indico.ific.uv.es/event/6803/contributions/21223/) by Vincent Mathieu.\n", + "\n", + "The reaction $ \\gamma p \\to \\eta \\pi^0 p$ is one of the reaction channels that are studied in photo-production experiments like [GlueX](http://www.gluex.org). For simplicity, we assume that the decay proceeds through three possible resonances—$a_2$, $\\Delta^+$, and $N^*$—with each in a different subsystem." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "```{image} https://github.com/ComPWA/compwa-org/assets/17490173/ec6bf191-bd5f-43b0-a6cb-da470b071630\n", + ":width: 100%\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In the following, we denote $1 \\equiv \\eta, 2 \\equiv \\pi^0, 3 \\equiv p$. Given these three subsystems in this particle transition, we can construct three amplitudes," + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "$$\n", + "\\begin{eqnarray}\n", + "A^{12} &=& \\frac{\\sum a_m Y_2^m (\\Omega_1)}{s_{12}-m^2_{a_2}+im_{a_2} \\Gamma_{a_2}} \\times s_{12}^{0.5+0.9u_3} \\,, \\nonumber \\\\\n", + "A^{23} &=& \\frac{\\sum b_m Y_1^m (\\Omega_2)}{s_{23}-m^2_{\\Delta}+im_{\\Delta} \\Gamma_{\\Delta}} \\times s_{23}^{0.5+0.9t_1} \\,, \\nonumber \\\\\n", + "A^{31} &=& \\frac{c_0}{s_{31}-m^2_{N^*}+im_{N^*} \\Gamma_{N^*}} \\times s_{31}^{1.08+0.2t_2} \\,,\n", + "\\end{eqnarray}\n", + "$$ (full-model-with-exponential)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "where $s, t, u$ are the Mandelstam variables ($s_{ij}=(p_i+p_j)^2$, $t_i=(p_a-p_i)^2$, and $u_i=(p_b-p_i)^2$), $m$ is the resonance mass, $\\Gamma$ is the resonance width, $Y^m_l$ are spherical harmonics functions, $\\Omega_i$ are pairs of Euler angles (polar angle $\\theta$ and azimuthal angle $\\phi$) that represent the helicity decay angles, and $a_i$, $b_i$, and $c_i$ are complex-valued coefficients. Note that the Mandelstam variables and angles come from measured events, while the other symbols are parameters that need to be modified in order to have the amplitude model match the data." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{note}\n", + "All that an amplitude model requires as data input, are **four-momenta**. For this amplitude model, there are just three required four-momenta per event, one for each final state in $p\\gamma \\to \\eta\\pi^0 p$.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The original full amplitude model from the [Lecture 11 in STRONG2020 HaSP School](https://indico.ific.uv.es/event/6803/contributions/21223/) is shown in Equation {eq}`full-model-with-exponential`.\n", + "*In this report, only the Breit–Wigner and Spherical harmonics terms are kept, while the exponential factor is abandoned, i.e.*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "$$\n", + "\\begin{eqnarray}\n", + "A^{12} &=& \\frac{\\sum a_m Y_2^m (\\Omega_1)}{s_{12}-m^2_{a_2}+im_{a_2} \\Gamma_{a_2}} \\,, \\\\\n", + "A^{23} &=& \\frac{\\sum b_m Y_1^m (\\Omega_2)}{s_{23}-m^2_{\\Delta}+im_{\\Delta} \\Gamma_{\\Delta}} \\,, \\\\\n", + "A^{31} &=& \\frac{c_0}{s_{31}-m^2_{N^*}+im_{N^*} \\Gamma_{N^*}} \\,.\n", + "\\end{eqnarray}\n", + "$$ (model-dynamics-and-angular)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The intensity $I$ that describes our measured distributions is then expressed as a coherent sum of the amplitudes $A^{ij}$,\n", + "\n", + "$$\n", + "\\begin{eqnarray}\n", + "I &=& |A^{12} + A^{23} + A^{31}|^2 \\,.\n", + "\\end{eqnarray}\n", + "$$ (intensity-coherent-sum)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Ultimately, the amplitude model in Equations {eq}`model-dynamics-and-angular` and {eq}`intensity-coherent-sum` in this tutorial consists of three resonances, and each of them are formed by two components: a Breit-Wigner times some spherical harmonics ($l = 2, 1, 0$)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{admonition} Assumption: spinless final state!\n", + ":class: dropdown\n", + "While the **spin** of the $\\eta$ meson and the $\\pi^0$ meson are both $0$, the spin of the proton is spin-$\\frac{1}{2}$.\n", + "However, in order to simplify the amplitude model, we treat the proton as a spin-0 particle.\n", + "Overall, the $\\eta$, $\\pi^0$ and $p$ are therefore all treated as spin-0 particles.\n", + "\n", + "The primary motivation for assuming proton to be spin-$0$ is to avoid the necessity of aligning the amplitudes (see e.g. [ComPWA/ampform#6](https://github.com/ComPWA/ampform/issues/6)).\n", + "This assumption enables the intensity $I$ to be written as a coherent sum of the amplitudes of the subsystems without the need for additional Wigner rotations.\n", + "In addition, the amplitude for each decay chain contains only one spherical harmonic, namely that of the resonance.\n", + "\n", + "The spherical harmonics in Equation {eq}`model-dynamics-and-angular` are therefore relevant only to the resonances. \n", + "Here, $l$ represents the spin of the resonances and $m$ represents its spin projection in the decay chain.\n", + "The total angular momentum and coupled spin (for the two-body state of the two decay products of the resonance) are not considered.\n", + "According to {cite}`Richman:1984gh` and other classical references on helicity, this is known as the **helicity basis**.\n", + "In contrast, the **canonical basis** does not sum over $L$ and $S$, resulting in a more complex coherent sum of amplitudes. \n", + "The transformation between these bases is also discussed [here](https://ampform.rtfd.io/0.15.x/usage/helicity/formalism.html) on the AmpForm website.\n", + "\n", + "In our case: \n", + "- $A^{12}$ represents a **d-wave** interaction, because we assume there is a $a_2$ resonance (spin 2) in this subsystem. The possible $m$ values are $−2,−1,0,1,2$. Each of these values corresponds to different orientations of the d-wave.\n", + "- $A^{23}$ represents a **p-wave** interaction, because we assume this subsystem has a (spin-1) $\\Delta^+$ resonance. The possible $m$ values are $−1,0,1$.\n", + "- $A^{31}$ represents an **s-wave** interaction, because we assume there is one spin-0 $N^*$ resonance in this subsystem. The only possible $m$ value is 0 and, since $Y_0^0=0$, the amplitude only consists of a Breit–Wigner.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "### Implementation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the following, we define a few functions that implement parts of the amplitude model of Equation {eq}`model-dynamics-and-angular`. Later on in [](#visualization), we can use these functions to visualize each components of the model individually." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "(breit-wigner-model)=\n", + "#### Breit-Wigner (only) Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "The following functions define the Breit–Wigner function, as well as the following intensity function that only contains the Breit–Wigner (dynamics) component of each amplitude.\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "I &= &\n", + "\\left\\lvert \\frac{1}{s-m^2_{a_2}+im_{a_2} \\Gamma_{a_2}} +\n", + "\\frac{1}{s-m^2_{\\Delta}+im_{\\Delta} \\Gamma_{\\Delta}} +\n", + "\\frac{1}{s-m^2_{N^*}+im_{N^*} \\Gamma_{N^*}} \\right\\rvert ^2\n", + "\\end{array}\n", + "$$ (model-dynamics-only)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def BW_model(s12, s23, s31, *, M12, Gamma12, M23, Gamma23, M31, Gamma31, **kwargs):\n", + " A12 = BW(s12, M12, Gamma12)\n", + " A23 = BW(s23, M23, Gamma23)\n", + " A31 = BW(s31, M31, Gamma31)\n", + " return np.abs(A12 + A23 + A31) ** 2\n", + "\n", + "\n", + "def BW(s, m, Gamma):\n", + " return 1 / (s - m**2 + m * Gamma * 1j)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "(spherical-harmonics-model)=\n", + "#### Spherical Harmonics (only) Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The calculation of $Y_l^m(\\phi, \\theta)$ is done via [`scipy.special.sph_harm()`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sph_harm.html). However, we use a different definition of $\\phi$ and $\\theta$, following a notation that is more common in hadron physics.\n", + "- $\\phi$ is the **azimuthal angle** and ranges from -$\\pi$ to $\\pi$. SciPy represents this as $\\theta$, ranging from $0$ to $2\\pi$.\n", + "- $\\theta$ is the **polar angle** and ranges from $0$ to $\\pi$. SciPy represents this as $\\phi$ with the same range.\n", + "\n", + "This leads to\n", + "\n", + "$$\n", + "Y_\\ell^m(\\phi, \\theta) = \\sqrt{\\frac{2n+1}{4\\pi}\\frac{(n-m)!}{(n+m)!}}e^{im\\phi}P_\\ell^m\\left(\\cos\\theta\\right) \\,.\n", + "$$ (spherical-harmonics-definition)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{tip}\n", + "Alternatively, we can formulate the spherical harmonics in terms of a Wigner-$d$ or Wigner-$D$ function, as\n", + "\n", + "$$\n", + "\\begin{eqnarray}\n", + "Y_\\ell^m(\\theta,\\phi) = \\sqrt{\\frac{2\\ell+1}{4\\pi}}D^\\ell_{m0}(-\\phi, \\theta, 0) = \\sqrt{\\frac{2\\ell+1}{4\\pi}}e^{im\\phi} d^\\ell_{m0}(\\theta) \\,.\n", + "\\end{eqnarray}\n", + "$$ (spherical-harmonics-wigner-d)\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In the following, we define functions to compute the spherical harmonics of the three subsystem amplitudes in Equation {eq}`model-dynamics-and-angular`. Note how the function signature consists of two input data columns, `theta` and `phi`, and how the rest of the arguments are parameters. The final `kwargs` (key-word arguments) is there so that we can compose larger functions from these function definitions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def Ylm12(\n", + " theta: np.ndarray,\n", + " phi: np.ndarray,\n", + " *,\n", + " a_minus2,\n", + " a_minus1,\n", + " a_0,\n", + " a_plus1,\n", + " a_plus2,\n", + " **kwargs,\n", + ") -> np.ndarray:\n", + " return (\n", + " a_plus2 * sp.special.sph_harm(2, 2, phi, theta)\n", + " + a_plus1 * sp.special.sph_harm(1, 2, phi, theta)\n", + " + a_0 * sp.special.sph_harm(0, 2, phi, theta)\n", + " + a_minus1 * sp.special.sph_harm(-1, 2, phi, theta)\n", + " + a_minus2 * sp.special.sph_harm(-2, 2, phi, theta)\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def Ylm23(\n", + " theta: np.ndarray, phi: np.ndarray, *, b_minus1, b_0, b_plus1, **kwargs\n", + ") -> np.ndarray:\n", + " return (\n", + " b_plus1 * sp.special.sph_harm(1, 1, phi, theta)\n", + " + b_0 * sp.special.sph_harm(0, 1, phi, theta)\n", + " + b_minus1 * sp.special.sph_harm(-1, 1, phi, theta)\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def Ylm31(theta: np.ndarray, phi: np.ndarray, c_0: complex, **kwargs) -> np.ndarray:\n", + " return c_0 * sp.special.sph_harm(0, 0, phi, theta)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "We now have the ingredients to define an intensity function that only contains spherical harmonics, that is, the angular part of the amplitude model:\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "I &=&\n", + "\\left\\lvert \\sum a_m Y_2^m (\\Omega_1) +\n", + "\\sum b_m Y_1^m (\\Omega_2) +\n", + "c_0 \\right\\rvert ^2 \\,.\n", + "\\end{array}\n", + "$$ (model-angular-only)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def SH_model(phi1, theta1, phi2, theta2, *, c_0, **pars):\n", + " return np.abs(Ylm12(phi1, theta1, **pars) + Ylm23(phi2, theta2, **pars) + c_0) ** 2" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Breit-Wigner $\\times$ Spherical Harmonics Model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "Finally, we combine the [](#breit-wigner-model) and [](#spherical-harmonics-model) to get an implementation for Equation {eq}`model-dynamics-and-angular`,\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "I &=& \\left\\lvert A^{12}+A^{23}+A^{31} \\right\\rvert ^2 \\nonumber \\\\\n", + "&=&\n", + "\\left\\lvert \\frac{\\sum a_m Y_2^m (\\Omega_1)}{s-m^2_{a_2}+im_{a_2} \\Gamma_{a_2}} +\n", + "\\frac{\\sum b_m Y_1^m (\\Omega_2)}{s-m^2_{\\Delta}+im_{\\Delta} \\Gamma_{\\Delta}} +\n", + "\\frac{c_0}{s-m^2_{N^*}+im_{N^*} \\Gamma_{N^*}} \\right\\rvert ^2 \\,.\n", + "\\end{array}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def BW_SH_model(\n", + " s12,\n", + " s23,\n", + " s31,\n", + " phi1,\n", + " theta1,\n", + " phi2,\n", + " theta2,\n", + " *,\n", + " M12,\n", + " Gamma12,\n", + " M23,\n", + " Gamma23,\n", + " M31,\n", + " Gamma31,\n", + " c_0,\n", + " **parameters,\n", + "):\n", + " A12 = BW(s12, M12, Gamma12) * Ylm12(phi1, theta1, **parameters)\n", + " A23 = BW(s23, M23, Gamma23) * Ylm23(phi2, theta2, **parameters)\n", + " A31 = BW(s31, M31, Gamma31) * c_0\n", + " return np.abs(A12 + A23 + A31) ** 2" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "## Visualization" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As discussed in [](#theory), the amplitude model contains variables that are provided by experiment (\"events\" as data input) as well as parameters that are to be optimized. The data input is usually in the form of **four-momenta**, while the model is formulated in the form of Mandelstam variables and helicity angles. We therefore have to compute these variables from the four-momenta." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "### Phase space generation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "First, however, we need to generate a phase space sample of four-momenta in order to plot the amplitude model as a distribution over each of the variables. In this section, we use the [`phasespace`](https://github.com/zfit/phasespace) package for generating four-momenta for the reaction $p\\gamma \\to p\\eta\\pi^0$. The phase space sample will also be used later on to normalize the model when calculating the likelihood over the data sample (see [](#fit-model))." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Lab frame and CM frame" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In Center-of-Mass (CM) frame, the 4-momentum of the total system can be acquired by 4-momentum conservation:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "$$\\begin{pmatrix}\n", + " E_0 \\\\\n", + " 0 \\\\\n", + " 0 \\\\\n", + " 0\n", + "\\end{pmatrix}\n", + "= \\begin{pmatrix}\n", + " E_{\\gamma} \\\\\n", + " 0 \\\\\n", + " 0 \\\\\n", + " p_z\n", + "\\end{pmatrix} +\n", + "\\begin{pmatrix}\n", + " E_p \\\\\n", + " 0 \\\\\n", + " 0 \\\\\n", + " -p_z\n", + "\\end{pmatrix}\n", + "= \\begin{pmatrix}\n", + " E_{\\eta} \\\\\n", + " p_{\\eta,x} \\\\\n", + " p_{\\eta,y} \\\\\n", + " p_{\\eta,z}\n", + "\\end{pmatrix} +\n", + "\\begin{pmatrix}\n", + " E_{\\pi} \\\\\n", + " p_{\\pi,x} \\\\\n", + " p_{\\pi,y} \\\\\n", + " p_{\\pi,z}\n", + "\\end{pmatrix} +\n", + "\\begin{pmatrix}\n", + " E_p \\\\\n", + " p_{p,x} \\\\\n", + " p_{p,y} \\\\\n", + " p_{p,z}\n", + "\\end{pmatrix}\n", + "$$ (four-momentum-conservation)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "From this we can see that the system depends mainly on beam momentum $p_z$ and CM total energy $E_0$." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{caution}\n", + "The calculation here involved using values from the CM frame. While this frame is commonly used for theoretical calculations, experimental data is often analyzed in the lab frame. However, it's worth noting that oin some collider experiments that do not have a fixed target, the CM frame can coincide with the lab frame.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The [GlueX](http://www.gluex.org/) experiment at Jefferson Lab uses a fixed proton target with a linearly polarized photon beam, and the beam energy range in the lab frame is typically from [**8 to 9 GeV**](https://doi.org/10.7566/JPSCP.26.022002)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "We use the $\\gamma$ beam energy in the lab frame as input, e.g. $E_{\\gamma, \\text{lab}} = 8.5 \\; \\text{GeV}$, and want to know the collision energy in the CM frame.\n", + "\n", + "We can calculate the energy of the photon in the CM frame as follows. The four-momentum of photon (beam) in the lab frame is\n", + "\n", + "$$\n", + "p_{\\gamma,\\text{lab}} = \\left(E_{\\gamma, \\text{lab}}, \\vec{p}_{\\gamma,\\text{lab}}\\right) \\,.\n", + "$$ (beam-four-momentum)\n", + "\n", + "Since the photon is massless, $m_\\gamma=0$, and\n", + "\n", + "$$\n", + "m^2 = E^2 - \\left|\\vec{p}\\right|^2 \\,,\n", + "$$ (invariant-mass-definition)\n", + "\n", + "we get\n", + "\n", + "$$\n", + "E_{\\gamma} = |\\vec{p_{\\gamma}}| .\n", + "$$ (gamma-energy)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The proton (target) is at rest in the lab frame, so we have\n", + "\n", + "$$\n", + "p_{p,\\text{lab}} = (m_p, \\vec{0})\\,,\n", + "$$ (four-momentum-target)\n", + "\n", + "where $m_p$ is the proton mass. We have a total four-momentum in the lab frame of\n", + "\n", + "$$\n", + "p_{\\text{tot},\\text{lab}} = p_{\\gamma,\\text{lab}} + p_{p,\\text{lab}}\n", + "$$ (total-four-momentum-lab)\n", + "\n", + "and we know\n", + "\n", + "$$\n", + "E_p = \\sqrt{p_p^2+ m_p^2} \\,.\n", + "$$ (four-momentum-proton)\n", + "\n", + "The CM total energy $E_0$ expressed in terms of quantities from the **lab frame** is thus\n", + "\n", + "$$\n", + "m_0 \\equiv E_0 = \\sqrt{2 E_{\\gamma,\\text{lab}} m_p + m_p^2} \\,.\n", + "$$ (total-energy-cm)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Equivalently, from the **CM frame** perspective, since $\\vec{p}_{\\gamma} = -\\vec{p}_{p}$ and $|\\vec{p}_{\\gamma}| = |\\vec{p}_{p}|= p_{z}$, we find\n", + "\n", + "$$\n", + "E_0 = m_0 = |p_{z}|+\\sqrt{p_{z}^2 + m_p^2} \\,.\n", + "$$ (total_energy_CM_frame_still)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "source": [ + "Therefore, the energy of the photon ($E_{\\gamma}$) and its momentum ($p_{\\gamma}$) in the **CM frame** are:\n", + "\n", + "$$\n", + "E_{\\gamma} = |\\vec{p}_{\\gamma}| = p_{z} = 1.944 \\; \\text{GeV} \\,.\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Our implementation based on Equation {eq}`total-energy-cm` thus becomes:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "E_lab_gamma = 8.5\n", + "m_proton = 0.938\n", + "m_eta = 0.548\n", + "m_pi = 0.135\n", + "m_0 = np.sqrt(2 * E_lab_gamma * m_proton + m_proton**2)\n", + "m_0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "p_beam = 1.943718\n", + "np.testing.assert_almost_equal(\n", + " p_beam + np.sqrt(p_beam**2 + m_proton**2), m_0, decimal=6\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Thus, we then have the mass of the system $m_0$ (or the mass of a 'virtual' particle $p\\gamma$) in CM frame of\n", + "\n", + "$$\n", + "E_{0} = m_0\\approx 4.102 \\;\\; \\text{GeV} \\,.\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Final state four-momenta" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The [`phasespace`](https://github.com/zfit/phasespace) library is a Python package designed to simulate particle decays according to the principles of relativistic kinematics and phase space distributions. We first use the [`phasespace`](https://github.com/zfit/phasespace) to generate decay particles." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "phsp_events = 500_000\n", + "weights, particles = phasespace.nbody_decay(m_0, [m_eta, m_pi, m_proton]).generate(\n", + " n_events=phsp_events\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Note that each event comes with a statistical weights for each generated event. These weights represent how likely each particular event configuration (set of momenta and energies) is, based on phase space considerations. In order to generate a flat distribution, we will have to use a hit-and-miss method over these weights." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "mystnb": { + "code_prompt_show": "(Click to show) These are some functions that we defined for a structured process for generating phase space samples of particle decays and converting them into four-momentum vectors using TensorFlow and the phasespace Python package." + }, + "tags": [ + "scroll-input" + ] + }, + "outputs": [], + "source": [ + "def generate_phsp_decay(\n", + " size: int, seed: int | None = None, bunch_size: int = 500_000\n", + ") -> tuple[MomentumNumpy4D, MomentumNumpy4D, MomentumNumpy4D]:\n", + " rng = np.random.default_rng(seed=seed)\n", + " generated_seed = rng.integers(1_000_000)\n", + " phsp_sample = generate_phsp_bunch(bunch_size, generated_seed)\n", + " while get_size(phsp_sample) < size:\n", + " bunch = generate_phsp_bunch(bunch_size, generated_seed)\n", + " phsp_sample = concatenate(phsp_sample, bunch)\n", + " phsp_sample = remove_overflow(phsp_sample, size)\n", + " return tuple(to_vector(tensor) for tensor in phsp_sample)\n", + "\n", + "\n", + "def generate_phsp_bunch(\n", + " size: int, seed: int | None = None\n", + ") -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n", + " rng = np.random.default_rng(seed=seed)\n", + " final_state = [m_eta, m_pi, m_proton]\n", + " weights, particles = phasespace.nbody_decay(m_0, final_state).generate(\n", + " n_events=size, seed=seed\n", + " )\n", + " random_weights = rng.uniform(0, weights.numpy().max(), size=weights.shape)\n", + " selector = weights > random_weights\n", + " return tuple(particles[f\"p_{i}\"][selector] for i in range(len(final_state)))\n", + "\n", + "\n", + "def get_size(phsp: tuple[tf.Tensor, tf.Tensor, tf.Tensor]):\n", + " return len(phsp[0])\n", + "\n", + "\n", + "def concatenate(\n", + " phsp1: tuple[tf.Tensor, tf.Tensor, tf.Tensor],\n", + " phsp2: tuple[tf.Tensor, tf.Tensor, tf.Tensor],\n", + ") -> tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n", + " return tuple(tf.concat([phsp1[i], phsp2[i]], axis=0) for i in range(3))\n", + "\n", + "\n", + "def remove_overflow(phsp: tuple[tf.Tensor, tf.Tensor, tf.Tensor], size: int):\n", + " return tuple(tensor[:size] for tensor in phsp)\n", + "\n", + "\n", + "def to_vector(tensor: tf.Tensor) -> MomentumNumpy4D:\n", + " return vector.array({\n", + " key: tensor.numpy().T[j] for j, key in enumerate([\"px\", \"py\", \"pz\", \"E\"])\n", + " })" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{admonition} Hit-and-miss sampling\n", + ":class: dropdown\n", + "- **Step 1**: Generate a small phase space sample bunch with `generate_phsp_bunch()`\n", + " - A NumPy random number generator (`rng`) is initialized with a fixed seed for reproducibility. This ensures that every time the simulation is run, it produces the same random numbers.\n", + " - The `phasespace` package is used to simulate a decay process where a parent particle decays into three daughter particles. This is set up by specifying the masses of the parent particle (`m_0`) and the daughter particles (`m_eta`, `m_pi`, `m_proton`). The function `nbody_decay` followed by generate is used to create the phase space and return both the weights of each decay event and the momenta of the decay products.\n", + " - As mentioned, the `phasespace` package generates four-momenta that are not evenly distributed .The distributions appear even only when multiplied by the generated weights. To get a sample bunch that is uniformly distributed, rejection sampling (\"hit-and-miss\") is applied over the weights. To achieve this, a random set of weights (`random_weights`) is generated, and only those events where the original weight exceeds the random weight are selected.\n", + "\n", + "- **Step 2**, Ensure sufficient sample size with `generate_phsp_decay()`\n", + " - The function starts by generating an initial phase space sample. If this initial sample does not meet the required size, more samples are generated and concatenated until the requested size is reached.\n", + " - Once the target sample size is reached or exceeded, `remove_overflow()` trims the sample to precisely match the requested size.\n", + "\n", + "- **Step 3**, Convert tensors to [four-momentum vectors](https://vector.readthedocs.io/en/latest/api/backends/vector.backends.numpy.html#vector.backends.numpy.MomentumNumpy4D) objects\n", + " The `phasespace` package generates four-momenta in the form of $4\\times N$ TensorFlow tensors, with $N$ the number of events. For each tensor in the phase space tuple (representing the momentum components like $E, p_x, p_y, p_z$), the `to_vector` function converts them into a structured `MomentumNumpy4D` array. This structured array includes keys for each momentum component and possibly the energy component and provides convenient methods for computing boosts etc.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Initial state four-momenta" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Because of the simple relation in Equation {eq}`four-momentum-conservation`, the four-momenta for the initial state, $\\gamma$ (a) and $p$ (b), do not have to be generated by a phase space generator." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "To find $p_a$ and $p_b$ by 4-momentum conservation, we can use\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "m_a &=& m_{\\gamma} =0 \\\\\n", + "m_b &=& m_p \\\\\n", + "p_{a,x} &=& p_{a,y} = 0 = p_{b,x} = p_{b,y} \\\\\n", + "p_{a,z} &=& - p_{b,z} \\,.\n", + "\\end{array}\n", + "$$\n", + "\n", + "Due to energy conservation, we have\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "E_a + E_b &=& E_1 + E_2 + E_3 = E_0 \\\\\n", + "E_a + E_b &=& \\sqrt{m_a^2 + p_a^2} + \\sqrt{m_b^2 + p_b^2} = E_0 \\,.\n", + "\\end{array}\n", + "$$\n", + "\n", + "Since $m_a=0$ and $p_a = -p_b$, we have\n", + "\n", + "$$\n", + "p_a + \\sqrt{m_b^2 + (-p_a)^2} = E_0 \\,.\n", + "$$\n", + "\n", + "Reorganizing, we get,\n", + "\n", + "$$\n", + "p_{a,z} + \\sqrt{m_p^2 + p_a^2} - E_0 = 0\n", + "$$\n", + "\n", + "and\n", + "\n", + "$$\n", + "p_{a,z} = \\frac{E_0^2 - m_p^2}{2E_0}\n", + "$$\n", + "\n", + "Finally, this gives us\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "p_{b,z} &=& -p_{a,z} \\\\\n", + "E_a &=& p_a \\\\\n", + "E_{b} &=& \\sqrt{m_b^2+p_b^2} \\,.\n", + "\\end{array}\n", + "$$ (pb-pa-relation)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "With this, our numerical implementation to compute four-momentum of $p_a$ and $p_b$ from Equation {eq}`pb-pa-relation` becomes:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def compute_pa_pb(\n", + " p1: MomentumNumpy4D, p2: MomentumNumpy4D, p3: MomentumNumpy4D\n", + ") -> tuple[MomentumNumpy4D, MomentumNumpy4D]:\n", + " shape = p1.shape\n", + " E0 = (p1 + p2 + p3).e\n", + " px = np.zeros(shape)\n", + " py = np.zeros(shape)\n", + " pz = np.ones(shape) * (E0**2 - p3.m.mean() ** 2) / (2 * E0)\n", + " E = np.ones(shape) * np.sqrt(p3.m.mean() ** 2 + pz.mean() ** 2)\n", + " pa = MomentumNumpy4D({\"E\": pz, \"px\": px, \"py\": py, \"pz\": pz})\n", + " pb = MomentumNumpy4D({\"E\": E, \"px\": px, \"py\": py, \"pz\": -pz})\n", + " return pa, pb" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Four-momenta of all particles" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "We now create a function `generate_phsp_all()` to create and compute all particles in phase space all-at-once, combining the two functions from the previous sections into one function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def generate_phsp_all(\n", + " size: int, seed: int | None = None, bunch_size: int = 500_000\n", + ") -> tuple[\n", + " MomentumNumpy4D, MomentumNumpy4D, MomentumNumpy4D, MomentumNumpy4D, MomentumNumpy4D\n", + "]:\n", + " p1, p2, p3 = generate_phsp_decay(size, seed, bunch_size)\n", + " pa, pb = compute_pa_pb(p1, p2, p3)\n", + " return p1, p2, p3, pa, pb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "p1_phsp, p2_phsp, p3_phsp, pa_phsp, pb_phsp = generate_phsp_all(\n", + " size=phsp_events, seed=42, bunch_size=1_000_000\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "### Kinematic variable calculation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Spherical coordinate system" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Before introducing CM and helicity angles, we first introduce **polar angles** and **azimuthal angles** in spherical coordinates." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In the spherical coordinates, the **polar angle** $\\theta$ and **azimuthal angle** $\\phi$ are defined as\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "\\theta &=& \\arccos \\frac{p_z}{|p|} \\nonumber \\\\\n", + "\\phi &=& \\arctan2(p_y , p_x) \\,.\n", + "\\end{array}\n", + "$$ (polar-azimuthal-angle)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "```{image} https://github.com/ComPWA/compwa.github.io/assets/29308176/89e1ad6e-3a7b-4895-b747-8bd644652504\n", + ":align: center\n", + ":width: 40%\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In Equation {eq}`polar-azimuthal-angle`, $p_z$ is equivalent to $z$, and $|p|$ is $r$ in figure above, while $p_y$ equivalent to $y$, and $p_x$ is $x$." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The sample is plotted to check whether the distribution looks uniformly distributed in the Dalitz plane. The Mandelstam variable $s$ of each of the three subsystems can be easily computed with from the four-momentum objects as follows. Here, we use the methods and attributes provided by the [`vector`](https://vector.readthedocs.io) package." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "p12_phsp = p1_phsp + p2_phsp\n", + "p23_phsp = p2_phsp + p3_phsp\n", + "p31_phsp = p3_phsp + p1_phsp\n", + "s12_phsp = p12_phsp.m2\n", + "s23_phsp = p23_phsp.m2\n", + "s31_phsp = p31_phsp.m2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "hide-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['png']\n", + "fig = plt.figure(figsize=(12, 5))\n", + "fig.suptitle(\"Phase Space Dalitz Plot\")\n", + "gs = gridspec.GridSpec(1, 3, width_ratios=[1, 1, 0.05]) # The last column for colorbar\n", + "ax1 = plt.subplot(gs[0])\n", + "ax2 = plt.subplot(gs[1])\n", + "cax = plt.subplot(gs[2]) # For colorbar\n", + "\n", + "hist2 = ax2.hist2d(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " bins=100,\n", + " cmin=1e-2,\n", + " density=True,\n", + " cmap=\"jet\",\n", + " range=[[0, 10.3], [0.5, 13]],\n", + ")\n", + "ax2.set_title(\"2D Histogram\")\n", + "ax2.set_xlabel(R\"$m^2(\\eta \\pi^0)\\;\\left[\\mathrm{GeV}^2\\right]$\")\n", + "\n", + "ax1.scatter(s12_phsp, s23_phsp, s=1e-4, c=\"black\", norm=mpl.colors.Normalize())\n", + "ax1.set_xlabel(R\"$m^2(\\eta \\pi^0)\\;\\left[\\mathrm{GeV}^2\\right]$\")\n", + "ax1.set_ylabel(R\"$m^2(\\pi^0 p)\\;\\left[\\mathrm{GeV}^2\\right]$\")\n", + "ax1.set_title(\"Scatter Plot\")\n", + "\n", + "fig.colorbar(hist2[3], cax=cax)\n", + "fig.tight_layout()\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](https://github.com/user-attachments/assets/88379dc9-235a-428c-a8e1-d0bcff6d8648)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{tip}\n", + "There are different ways to represent the Dalitz plot, each with its advantages.\n", + "\n", + "- *Scatter Plot*: This method plots individual events as points, offering a clear view of the density and distribution of events within the phase space. It is particularly useful for visualizing smaller datasets or when high resolution is needed to identify specific features or clusters.\n", + "\n", + "- *2D Histogram*: This approach divides the phase space into bins and counts the number of events within each bin, representing the density of events using a color scale. It is effective for large datasets, providing a smooth and continuous representation of the phase space that highlights overall trends and structures.\n", + "\n", + "In conclusion, to check if the phase space sample is evenly distributed, a scatter plot is typically more straightforward and visually clear.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### CM Angles " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Angles in the CM frame are the polar and azimuthal angles of the spatial components of the four-momenta in the CM frame (i.e. the frame that satisfies the relations in Equation {eq}`four-momentum-conservation`). They are different than the [helicity angles](#helicity-angles) in each subsystem (which is after rotation and boost into the subsystem)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The values for phase space can be obtained directly in the CM frame, without boosting into a different frame after the generation.\n", + "We denote these angles as\n", + "\n", + "$$\n", + "\\theta_1^\\text{CM}, \\theta_2^\\text{CM}, \\theta_3^\\text{CM}, \\phi_1^\\text{CM}, \\phi_2^\\text{CM}, \\phi_3^\\text{CM} \\,.\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "theta1_CM_phsp = p1_phsp.theta\n", + "theta2_CM_phsp = p2_phsp.theta\n", + "theta3_CM_phsp = p3_phsp.theta\n", + "phi1_CM_phsp = p1_phsp.phi\n", + "phi2_CM_phsp = p2_phsp.phi\n", + "phi3_CM_phsp = p3_phsp.phi" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Helicity angles" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{seealso}\n", + "- [Section of helicity formalism in **TR-015** (Spin alignment implementation)](https://compwa.github.io/report/015.html#helicity-formalism)\n", + "- [Helicity versus canonical in Ampform](https://ampform.readthedocs.io/stable/usage/helicity/formalism.html)\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The amplitude model shown in Equation {eq}`model-dynamics-and-angular` derives from the helicity formalism. This spin formalism breaks up the transition amplitudes of a decay chain into two-body decays (**isobar model**), which allows separating angular (spin) dependence from dynamics (e.g. Breit–Wigner).\n", + "\n", + "Crucially, the helicity formalism builds on a chain of boosts and rotations that align the reference frame of each two-body decay node such that the particle moves along the $z$ direction. In the amplitude model of Equation {eq}`model-dynamics-and-angular`, the rotations are still visible in the form of **Wigner-$D$ matrices**, which derive from rotations of spin states. (See Equation {eq}`spherical-harmonics-wigner-d` for the relation between $Y^m_l$ and $d$ and $D$ functions.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{admonition} Helicity states and Wigner matrices\n", + ":class: dropdown\n", + "##### Helicity\n", + "Helicity $\\lambda$ is the projection of a particle's spin $S$ onto its direction of motion $\\vec{p}$. Mathematically, it's given by\n", + "```{math}\n", + "\\lambda = \\frac{S \\cdot \\vec{p}}{|\\vec{p}|} \\,.\n", + "```\n", + "For massless particles (photons in particular), helicity is a Lorentz-invariant quantity, meaning it remains unchanged under boosts along the direction of motion.\n", + "\n", + "##### Helicity state\n", + "Helicity states are eigenstates of the helicity operator. For a particle moving in the $z$ direction, the helicity operator is $S \\cdot \\hat{p}$.\n", + "\n", + "##### Wigner-$D$ matrix\n", + "Wigner-$D$ matrices are used in the helicity formalism to describe the rotation of spin states. These matrices depend on the Euler angles of the rotation and are denoted by \n", + "```{math}\n", + "D^j_{m'm} (\\alpha,\\beta,\\gamma) = e^{-im'\\alpha} d^j_{m'm}(\\beta) e^{-im\\gamma} \\,,\n", + "```\n", + "where $j$ is the spin, $m'$ and $m$ are the magnetic quantum numbers, $\\alpha, \\beta, \\gamma$ are the Euler angles of the rotation, and $d^j_{m'm}(\\beta)$ is an element of the orthogonal Wigner's (small) $d$-matrix.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "As can be seen in the amplitude model of Equation {eq}`model-dynamics-and-angular`, that the rotations (represented by $Y^m_l$) contain solid angles $\\Omega=\\phi,\\theta$ (see [spherical coordinates](#spherical-coordinate-system)). They have to be computed in the **helicity frame** of the resonance, which means we have to boost into each of the three subsystems. For instance, for the $a_2$ resonance ($A^{12}$), this would be a boost into subsystem $p_1+p_2$, plus a rotation such that the $z$ axis points in the direction of $p_1+p_2$. The **helicity angles** $\\phi$ and $\\theta$ can then easily be computed from the [spherical coordinates](#spherical-coordinate-system) of the (boosted) $p_1'$ or $p_2'$." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + ":::{admonition} Reference frames after boost\n", + ":class: full-width\n", + "\n", + "| CM frame | Helicity frame |\n", + "|---|---|\n", + "| Before boosting | After boosting into the $p_1+p_2$ subsystem |\n", + "| | |\n", + "\n", + "The **production plane** (cyan) is defined by the momenta of the incoming particles that participate in the production of a particular state or particle. In the figure, we have $a+b \\to (R\\to 1+2)+3$ (subsystem $p_1+p_2$), meaning that the production plane is spanned by the momenta of $a$ (or $b$) and $1$.\n", + "\n", + "The **decay plane** (magenta) is defined by the momenta of the decay products of a particle. In the figure, the production plane is spanned by the momenta of $1$ and $2$.\n", + "\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "#### Numerical angle computation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "To calculate the helicity angles $\\theta$ and $\\phi$, we define functions for boosting a combination of boost and rotation (around the $y$ and $z$ axis)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def theta_helicity(p_i: MomentumNumpy4D, p_ij: MomentumNumpy4D) -> np.ndarray:\n", + " return _boost_to_helicity_frame(p_i, p_ij).theta\n", + "\n", + "\n", + "def phi_helicity(p_i: MomentumNumpy4D, p_ij: MomentumNumpy4D) -> np.ndarray:\n", + " return _boost_to_helicity_frame(p_i, p_ij).phi\n", + "\n", + "\n", + "def _boost_to_helicity_frame(\n", + " p_i: MomentumNumpy4D, p_ij: MomentumNumpy4D\n", + ") -> MomentumNumpy4D:\n", + " return p_i.rotateZ(-p_ij.phi).rotateY(-p_ij.theta).boostZ(-p_ij.beta)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "theta1_phsp = theta_helicity(p1_phsp, p12_phsp)\n", + "theta2_phsp = theta_helicity(p2_phsp, p23_phsp)\n", + "theta3_phsp = theta_helicity(p3_phsp, p31_phsp)\n", + "phi1_phsp = phi_helicity(p1_phsp, p12_phsp)\n", + "phi2_phsp = phi_helicity(p2_phsp, p23_phsp)\n", + "phi3_phsp = phi_helicity(p3_phsp, p31_phsp)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The distribution of the phase space sample over the kinematic variables is shown later in [this section](#my_section), together the generated data and weighted phase space (model)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "(parameter-values)=\n", + "### Toy model parameter values" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Now that we have a numerical implementation for the amplitude model of Equation {eq}`model-dynamics-and-angular` and a means to compute the kinematic variables appearing in that expression, we are ready to visualize the model. First, however, we have to decide on some toy values for the parameters in the model. The toy model parameter values can be obtained from the data file from the [Lecture 11 in STRONG2020 HaSP School](https://indico.ific.uv.es/event/6803/contributions/21223/). In this tutorial, the values are modified to make the structures in the Dalitz plot more visible." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "toy_parameters = dict(\n", + " a_minus2=0,\n", + " a_minus1=0.5,\n", + " a_0=3.5,\n", + " a_plus1=4,\n", + " a_plus2=2.5,\n", + " b_minus1=-1.5,\n", + " b_0=4,\n", + " b_plus1=0.5,\n", + " c_0=2.5,\n", + " M12=1.32,\n", + " Gamma12=0.1,\n", + " M23=1.24 + 0.3,\n", + " Gamma23=0.1,\n", + " M31=1.57 + 0.3,\n", + " Gamma31=0.1,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Note that the masses $(M)$ and widths $(\\Gamma)$ are properties of the three resonances.\n", + "\n", + "$$\n", + "\\begin{array}{rcl}\n", + "M_{12} &=& M_{\\eta\\pi^0} = M_{a_2} \\\\\n", + "M_{23} &=& M_{\\pi^0p} = M_{\\Delta^+} \\\\\n", + "M_{31} &=& M_{\\eta p} = M_{N^*} \\\\\n", + "\\Gamma_{12} &=& \\Gamma_{\\eta\\pi^0} = \\Gamma_{a_2} \\\\\n", + "\\Gamma_{23} &=& \\Gamma_{\\pi^0p} = \\Gamma_{\\Delta^+} \\\\\n", + "\\Gamma_{31} &=& \\Gamma_{\\eta p} = \\Gamma_{N^*} \\\\\n", + "\\end{array}\n", + "$$ (mass-and-width-parameters)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "(spherical-harmonics-visualization)=\n", + "### Spherical harmonics" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "We now have a look at the real part and imaginary part of $\\sum a_m Y_2^m (\\Omega_1)$ as well as $\\sum b_m Y_1^m (\\Omega_2)$ in Equation {eq}`model-dynamics-and-angular`. For this, we define a grid of values for $\\phi$ and $\\theta$ over which to visualize the amplitudes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "PHI, THETA = np.meshgrid(\n", + " np.linspace(-np.pi, +np.pi, num=1_000),\n", + " np.linspace(0, np.pi, num=1_000),\n", + ")\n", + "Z12 = Ylm12(PHI, THETA, **toy_parameters)\n", + "Z23 = Ylm23(PHI, THETA, **toy_parameters)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "hide-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['png']\n", + "fig, axes = plt.subplots(figsize=(10, 4), ncols=2, sharey=True, dpi=120)\n", + "cmap_real = axes[0].pcolormesh(\n", + " np.degrees(PHI), np.degrees(THETA), Z12.real, cmap=plt.cm.coolwarm\n", + ")\n", + "cmap_imag = axes[1].pcolormesh(\n", + " np.degrees(PHI), np.degrees(THETA), Z12.imag, cmap=plt.cm.coolwarm\n", + ")\n", + "\n", + "axes[0].set_xlabel(R\"$\\phi$ [deg]\")\n", + "axes[0].set_ylabel(R\"$\\theta$ [deg]\")\n", + "axes[0].set_title(R\"Re($\\sum a_m Y_2^m (\\Omega_1)$)\")\n", + "axes[0].set_ylabel(R\"$\\theta$ [deg]\")\n", + "axes[1].set_xlabel(R\"$\\phi$ [deg]\")\n", + "axes[1].set_title(R\"Im($\\sum a_m Y_2^m (\\Omega_1)$)\")\n", + "\n", + "cbar_real = fig.colorbar(cmap_real, ax=axes[0])\n", + "cbar_imag = fig.colorbar(cmap_imag, ax=axes[1])\n", + "\n", + "fig.subplots_adjust(wspace=0.4, hspace=0.4)\n", + "fig.tight_layout()\n", + "plt.rcParams.update({\"font.size\": 10})\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "![](https://github.com/user-attachments/assets/95592061-ce3f-4e57-8c35-0c2202e0d662)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "hide-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['png']\n", + "fig, axes = plt.subplots(figsize=(10, 4), ncols=2, sharey=True, dpi=120)\n", + "cmap_real = axes[0].pcolormesh(\n", + " np.degrees(PHI), np.degrees(THETA), Z23.real, cmap=plt.cm.coolwarm\n", + ")\n", + "cmap_imag = axes[1].pcolormesh(\n", + " np.degrees(PHI), np.degrees(THETA), Z23.imag, cmap=plt.cm.coolwarm\n", + ")\n", + "\n", + "axes[0].set_xlabel(R\"$\\phi$ [deg]\")\n", + "axes[0].set_ylabel(R\"$\\theta$ [deg]\")\n", + "axes[0].set_title(R\"Re($\\sum b_m Y_1^m (\\Omega_2)$)\")\n", + "axes[0].set_ylabel(R\"$\\theta$ [deg]\")\n", + "axes[1].set_xlabel(R\"$\\phi$ [deg]\")\n", + "axes[1].set_title(R\"Im($\\sum b_m Y_1^m (\\Omega_2)$)\")\n", + "\n", + "cbar_real = fig.colorbar(cmap_real, ax=axes[0])\n", + "cbar_imag = fig.colorbar(cmap_imag, ax=axes[1])\n", + "\n", + "fig.subplots_adjust(wspace=0.4, hspace=0.4)\n", + "fig.tight_layout()\n", + "plt.rcParams.update({\"font.size\": 10})\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "![](https://github.com/user-attachments/assets/cdcd343f-21f8-4faf-a7ee-ef10f8bd8036)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "### Dalitz Plots of (sub)models" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Dalitz plots are ideal for visualizing the model over the Mandelstam variables $s_{12}$, $s_{23}$, and $s_{31}$. In the following, we plot each of the 'sub-models' separately: Breit-Wigner (only), Spherical Harmonics (only), and Breit-Wigner $\\times$ Spherical Harmonics.\n", + "\n", + "To integrate out the model dependence on the helicity angles, we plot the Dalitz plots as a histogram over the phase space sample, taking the computed intensities for each event in the sample as weight." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "mystnb": { + "code_prompt_show": "Compute intensities from models for plotting histograms" + }, + "tags": [ + "hide-cell", + "hide-output", + "hide-input" + ] + }, + "outputs": [], + "source": [ + "BW_intensities = BW_model(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " s31_phsp,\n", + " **toy_parameters,\n", + ")\n", + "SH_intensities = SH_model(\n", + " phi1_phsp,\n", + " theta1_phsp,\n", + " phi2_phsp,\n", + " theta2_phsp,\n", + " **toy_parameters,\n", + ")\n", + "BW_SH_intensities = BW_SH_model(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " s31_phsp,\n", + " phi1_phsp,\n", + " theta1_phsp,\n", + " phi2_phsp,\n", + " theta2_phsp,\n", + " **toy_parameters,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input", + "scroll-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['png']\n", + "fig, axes = plt.subplots(figsize=(12, 4), ncols=3, sharey=True)\n", + "ax1, ax2, ax3 = axes\n", + "fig.suptitle(\"Dalitz Plots of sub-models\")\n", + "ax1.set_title(\"BW model only\")\n", + "ax2.set_title(\"SH model only\")\n", + "ax3.set_title(R\"BW $\\times$ SH model\")\n", + "for ax in axes:\n", + " ax.set_xlabel(R\"$m^2(\\eta \\pi^0)$\")\n", + " ax.set_ylabel(R\"$m^2(\\pi^0 p)$\")\n", + "\n", + "hist1 = ax1.hist2d(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " bins=100,\n", + " cmap=\"jet\",\n", + " cmin=1e-6,\n", + " density=True,\n", + " weights=BW_intensities,\n", + ")\n", + "hist2 = ax2.hist2d(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " bins=100,\n", + " cmap=\"jet\",\n", + " cmin=1e-6,\n", + " density=True,\n", + " weights=SH_intensities,\n", + ")\n", + "hist3 = ax3.hist2d(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " bins=100,\n", + " cmap=\"jet\",\n", + " cmin=1e-6,\n", + " density=True,\n", + " weights=BW_SH_intensities,\n", + ")\n", + "\n", + "fig.colorbar(hist1[3], ax=ax1)\n", + "fig.colorbar(hist2[3], ax=ax2)\n", + "fig.colorbar(hist3[3], ax=ax3)\n", + "fig.tight_layout()\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "![fig](https://github.com/user-attachments/assets/eac2eb28-4cd5-4115-a9eb-af7d3bd1ec86)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "(generate-data)=\n", + "## Data Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In an actual amplitude analysis, we are now ready to 'fit' the model we formulated to a measured data distribution. In this tutorial, however, we generate the data ourselves and then perform a test fit using a starting model with some 'guessed' parameter values." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "### Hit and miss intensity sample" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The data can be generated with a similar hit-and-miss strategy as we saw in [](#phase-space-generation). In this case, the hit-and-miss is performed over the intensities computed from the model.\n", + "\n", + "The following function generates a data sample based on a model by using hit-and-miss filter applied to the phase space sample. The output is a {obj}`tuple` of five four-momenta $(p_1, p_2, p_3, p_a, p_b)$." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "scroll-input" + ] + }, + "outputs": [], + "source": [ + "def generate_data(\n", + " model: callable, size: int, bunch_size: int = 500_000, seed: int | None = None\n", + ") -> tuple[MomentumNumpy4D, ...]:\n", + " rng = np.random.default_rng(seed=seed)\n", + " generated_seed = rng.integers(low=0, high=1_000_000)\n", + " progress_bar = tqdm(total=size)\n", + " phase_space = generate_phsp_all(bunch_size, generated_seed)\n", + " data_sample = _hit_and_miss_filter(model, phase_space, generated_seed)\n", + " progress_bar.n = min(get_size(data_sample), size)\n", + " progress_bar.update(n=0)\n", + " while get_size_data(data_sample) < size:\n", + " phase_space = generate_phsp_all(bunch_size, generated_seed)\n", + " bunch = _hit_and_miss_filter(model, phase_space)\n", + " data_sample = concatenate_data(data_sample, bunch)\n", + " progress_bar.n = min(get_size(data_sample), size)\n", + " progress_bar.update(n=0)\n", + " progress_bar.close()\n", + " return remove_overflow_data(data_sample, size)\n", + "\n", + "\n", + "def _hit_and_miss_filter(\n", + " model: callable, phase_space: tuple[MomentumNumpy4D, ...], seed: int | None = None\n", + ") -> tuple[MomentumNumpy4D, ...]:\n", + " p1, p2, p3, pa, pb = phase_space\n", + " p12 = p1 + p2\n", + " p23 = p2 + p3\n", + " p31 = p3 + p1\n", + "\n", + " intensities: np.ndarray = model(\n", + " s12=p12.m2,\n", + " s23=p23.m2,\n", + " s31=p31.m2,\n", + " phi1=phi_helicity(p1, p12),\n", + " theta1=theta_helicity(p1, p12),\n", + " phi2=phi_helicity(p2, p23),\n", + " theta2=theta_helicity(p2, p23),\n", + " **toy_parameters,\n", + " )\n", + " rng = np.random.default_rng(seed=seed) # FIX seed is used here for reproducibility\n", + " random_intensities = rng.uniform(0, intensities.max(), size=intensities.shape)\n", + " selector = intensities > random_intensities\n", + " return (\n", + " p1[selector],\n", + " p2[selector],\n", + " p3[selector],\n", + " pa[selector],\n", + " pb[selector],\n", + " )\n", + "\n", + "\n", + "def get_size_data(\n", + " data: tuple[\n", + " MomentumNumpy4D,\n", + " MomentumNumpy4D,\n", + " MomentumNumpy4D,\n", + " MomentumNumpy4D,\n", + " MomentumNumpy4D,\n", + " ],\n", + "):\n", + " return len(data[0])\n", + "\n", + "\n", + "def concatenate_data(\n", + " data1: tuple[MomentumNumpy4D, ...],\n", + " data2: tuple[MomentumNumpy4D, ...],\n", + ") -> tuple[MomentumNumpy4D, ...]:\n", + " return tuple(concatenate_vectors((pi1, pj2)) for pi1, pj2 in zip(data1, data2))\n", + "\n", + "\n", + "def concatenate_vectors(vectors: tuple[MomentumNumpy4D]) -> MomentumNumpy4D:\n", + " return vector.array({\n", + " \"px\": np.concatenate([p.px for p in vectors]),\n", + " \"py\": np.concatenate([p.py for p in vectors]),\n", + " \"pz\": np.concatenate([p.pz for p in vectors]),\n", + " \"E\": np.concatenate([p.e for p in vectors]),\n", + " })\n", + "\n", + "\n", + "def remove_overflow_data(\n", + " data: tuple[MomentumNumpy4D, ...], size: int\n", + ") -> tuple[MomentumNumpy4D, ...]:\n", + " return tuple(momentum[:size] for momentum in data)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{admonition} Hit-and-miss data generation\n", + ":class: dropdown\n", + "\n", + "A few functions are defined above for generating data sample from filtering phase space samples of particle decays and converting them into four-momentum vectors. The main ideas of them are:\n", + "\n", + "- `generate_data()`: The main function used to generate the data\n", + " - It simulates events until it gathers a specified number of valid data points that meet the model’s conditions.\n", + " - Parameters:\n", + " - model: A callable that computes the intensity of events based on given kinematic variables.\n", + " - size: The desired number of valid events to generate.\n", + " - bunch_size: The number of events to generate per iteration if the initial batch doesn't meet the required size. Defaults to size if not provided.\n", + " - seed: Seed for random number generation to ensure reproducibility.\n", + " - Process:\n", + " - Initialize a random number generator.\n", + " - Generate an initial set of phase space data (phase_space) and filter it using the model to obtain valid events (data_sample).\n", + " - If the filtered data (data_sample) is less than the desired size, additional data is generated in batches (bunch_size) and filtered until the size requirement is met.\n", + " - The function ensures the returned dataset size matches exactly the requested size.\n", + " \n", + "\n", + "- `_hit_and_miss_filter`: The other important function that filters the generated phase space data\n", + " - using the hit-and-miss Monte Carlo method based on the model intensities.\n", + " - Parameters:\n", + " - model: The model function to calculate intensities.\n", + " - phase_space: Tuple containing momentum vectors of particles involved in the event.\n", + " - seed: Seed for random number generation.\n", + " - Process:\n", + " - Calculate invariant masses and other kinematic variables required by the model.\n", + " - Call the model with these variables to compute intensities for each event.\n", + " - Generate random numbers and select events where the model intensity is greater than a random threshold, simulating a probability proportional to the intensity.\n", + " \n", + "There are other assisted functions that are used in the functions above:\n", + "\n", + "- `get_size_data`:\n", + "\n", + " - Simply returns the number of events in the data, used to check if more data needs to be generated.\n", + "\n", + "- `concatenate_data`:\n", + " - Merges two datasets into one, maintaining the structure of tuples of momentum vectors.\n", + "\n", + "- `concatenate_vectors`:\n", + " - Helper function to concatenate individual momentum vectors within the data tuples.\n", + "\n", + "- `remove_overflow_data`:\n", + " - Trims the dataset to the desired size if it exceeds the specified number due to batch generation.\n", + "\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Now we can use the function `generate_data()` to generate our data sample:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "data_events = 100_000\n", + "data = generate_data(BW_SH_model, data_events, seed=0, bunch_size=1_000_000)\n", + "p1_data, p2_data, p3_data, pa_data, pb_data = data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "mystnb": { + "code_prompt_show": "Convert four-momenta to kinematic variables" + }, + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "p12_data = p1_data + p2_data\n", + "p23_data = p2_data + p3_data\n", + "p31_data = p3_data + p1_data\n", + "\n", + "s12_data = p12_data.m2\n", + "s23_data = p23_data.m2\n", + "s31_data = p31_data.m2\n", + "\n", + "theta1_CM_data = p1_data.theta\n", + "theta2_CM_data = p2_data.theta\n", + "theta3_CM_data = p3_data.theta\n", + "phi1_CM_data = p1_data.phi\n", + "phi2_CM_data = p2_data.phi\n", + "phi3_CM_data = p3_data.phi\n", + "\n", + "theta1_data = theta_helicity(p1_data, p12_data)\n", + "theta2_data = theta_helicity(p2_data, p23_data)\n", + "theta3_data = theta_helicity(p3_data, p31_data)\n", + "phi1_data = phi_helicity(p1_data, p12_data)\n", + "phi2_data = phi_helicity(p2_data, p23_data)\n", + "phi3_data = phi_helicity(p3_data, p31_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "### Dalitz plot of data distribution" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "hide-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['png']\n", + "fig, ax = plt.subplots(figsize=(6, 5))\n", + "vmax = 0.15\n", + "fig.suptitle(f\"Dalitz plot of generated data (with cut at max={vmax})\")\n", + "hist = ax.hist2d(\n", + " s12_data,\n", + " s23_data,\n", + " bins=100,\n", + " cmin=1e-20,\n", + " density=True,\n", + " cmap=\"jet\",\n", + " vmax=vmax,\n", + ")\n", + "ax.set_xlabel(r\"$m^2_{\\eta \\pi^0}$\")\n", + "ax.set_ylabel(r\"$m^2_{\\pi^0 p}$\")\n", + "cbar = fig.colorbar(hist[3], ax=ax)\n", + "fig.tight_layout()\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "![](https://github.com/user-attachments/assets/5f22c929-3ea8-49c9-a9ce-5db175b829f1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "(my_section)=\n", + "### 1D projections" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "The 1D projection distribution of CM angles, the helicity angles and invariant mass of the model, phase space, and data are shown in this section." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "hide-input", + "scroll-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['svg']\n", + "thetaCM_subtitles = [\n", + " R\"$\\cos (\\theta_{1}^{{CM}}) \\equiv \\cos (\\theta_{\\eta}^{{CM}})$\",\n", + " R\"$\\cos (\\theta_{2}^{{CM}}) \\equiv \\cos (\\theta_{\\pi^0}^{{CM}})$\",\n", + " R\"$\\cos (\\theta_{3}^{{CM}}) \\equiv \\cos (\\theta_{p}^{{CM}})$\",\n", + "]\n", + "phiCM_subtitles = [\n", + " R\"$\\phi_1^{CM} \\equiv \\phi_{\\eta}^{CM}$\",\n", + " R\"$\\phi_2^{CM} \\equiv \\phi_{\\pi^0}^{CM}$\",\n", + " R\"$\\phi_3^{CM} \\equiv \\phi_{p}^{CM}$\",\n", + "]\n", + "theta_subtitles = [\n", + " R\"$\\cos (\\theta_{1}^{{12}}) \\equiv \\cos (\\theta_{\\eta}^{{\\eta \\pi^0}})$\",\n", + " R\"$\\cos (\\theta_{2}^{{23}}) \\equiv \\cos (\\theta_{\\pi^0}^{{\\pi^0 p}})$\",\n", + " R\"$\\cos (\\theta_{3}^{{31}}) \\equiv \\cos (\\theta_{p}^{{p \\eta}})$\",\n", + "]\n", + "phi_subtitles = [\n", + " R\"$\\phi_1^{12} \\equiv \\phi_{\\eta}^{{\\eta \\pi^0}}$\",\n", + " R\"$\\phi_2^{23} \\equiv \\phi_{\\pi^0}^{{\\pi^0 p}}$\",\n", + " R\"$\\phi_3^{31} \\equiv \\phi_{p}^{{p \\eta}}$\",\n", + "]\n", + "mass_subtitles = [\n", + " R\"$m_{12} \\equiv m_{\\eta \\pi^0}$\",\n", + " R\"$m_{23} \\equiv m_{\\pi^0 p}$\",\n", + " R\"$m_{31} \\equiv m_{p \\eta}$\",\n", + "]\n", + "\n", + "fig, (thetaCM_ax, phiCM_ax, theta_ax, phi_ax, mass_ax) = plt.subplots(\n", + " figsize=(13, 18), ncols=3, nrows=5\n", + ")\n", + "for i, ax1 in enumerate(thetaCM_ax, 1):\n", + " ax1.set_title(thetaCM_subtitles[i - 1])\n", + " ax1.set_xticks([-1, 0, 1])\n", + "for i, ax2 in enumerate(phiCM_ax, 1):\n", + " ax2.set_title(phiCM_subtitles[i - 1])\n", + " ax2.set_xticks([-np.pi, 0, np.pi])\n", + " ax2.set_xticklabels([R\"-$\\pi$\", 0, R\"$\\pi$\"])\n", + "\n", + "for i, ax3 in enumerate(theta_ax, 1):\n", + " ax3.set_title(theta_subtitles[i - 1])\n", + " ax3.set_xticks([-1, 0, 1])\n", + "\n", + "for i, ax4 in enumerate(phi_ax, 1):\n", + " ax4.set_title(phi_subtitles[i - 1])\n", + " ax4.set_xticks([-np.pi, 0, np.pi])\n", + " ax4.set_xticklabels([R\"-$\\pi$\", 0, R\"$\\pi$\"])\n", + "\n", + "for i, ax5 in enumerate(mass_ax, 1):\n", + " ax5.set_title(mass_subtitles[i - 1])\n", + "\n", + "thetaCM_ax[0].hist(\n", + " np.cos(theta1_CM_phsp),\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[1].hist(\n", + " np.cos(theta2_CM_phsp),\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[2].hist(\n", + " np.cos(theta3_CM_phsp),\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[0].hist(\n", + " np.cos(theta1_CM_phsp),\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[1].hist(\n", + " np.cos(theta2_CM_phsp),\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[2].hist(\n", + " np.cos(theta3_CM_phsp),\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[0].hist(\n", + " np.cos(theta1_CM_phsp),\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[1].hist(\n", + " np.cos(theta2_CM_phsp),\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[2].hist(\n", + " np.cos(theta3_CM_phsp),\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[0].hist(\n", + " np.cos(theta1_CM_phsp),\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[1].hist(\n", + " np.cos(theta2_CM_phsp),\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "thetaCM_ax[2].hist(\n", + " np.cos(theta3_CM_phsp),\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "\n", + "\n", + "phiCM_ax[0].hist(\n", + " phi1_CM_phsp,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[1].hist(\n", + " phi2_CM_phsp,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[2].hist(\n", + " phi3_CM_phsp,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[0].hist(\n", + " phi1_CM_phsp,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[1].hist(\n", + " phi2_CM_phsp,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[2].hist(\n", + " phi3_CM_phsp,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[0].hist(\n", + " phi1_CM_phsp,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[1].hist(\n", + " phi2_CM_phsp,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[2].hist(\n", + " phi3_CM_phsp,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[0].hist(\n", + " phi1_CM_phsp,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[1].hist(\n", + " phi2_CM_phsp,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "phiCM_ax[2].hist(\n", + " phi3_CM_phsp,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "\n", + "\n", + "theta_ax[0].hist(\n", + " np.cos(theta1_phsp),\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "theta_ax[1].hist(\n", + " np.cos(theta2_phsp),\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "theta_ax[2].hist(\n", + " np.cos(theta3_phsp),\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "theta_ax[0].hist(\n", + " np.cos(theta1_phsp),\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "theta_ax[1].hist(\n", + " np.cos(theta2_phsp),\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "theta_ax[2].hist(\n", + " np.cos(theta3_phsp),\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "theta_ax[0].hist(\n", + " np.cos(theta1_phsp),\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "theta_ax[1].hist(\n", + " np.cos(theta2_phsp),\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "theta_ax[2].hist(\n", + " np.cos(theta3_phsp),\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "theta_ax[0].hist(\n", + " np.cos(theta1_phsp),\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "theta_ax[1].hist(\n", + " np.cos(theta2_phsp),\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "theta_ax[2].hist(\n", + " np.cos(theta3_phsp),\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "\n", + "\n", + "phi_ax[0].hist(\n", + " phi1_phsp,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "phi_ax[1].hist(\n", + " phi2_phsp,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "phi_ax[2].hist(\n", + " phi3_phsp,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "phi_ax[0].hist(\n", + " phi1_phsp,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "phi_ax[1].hist(\n", + " phi2_phsp,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "phi_ax[2].hist(\n", + " phi3_phsp,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "phi_ax[0].hist(\n", + " phi1_phsp,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "phi_ax[1].hist(\n", + " phi2_phsp,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "phi_ax[2].hist(\n", + " phi3_phsp,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "phi_ax[0].hist(\n", + " phi1_phsp,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "phi_ax[1].hist(\n", + " phi2_phsp,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "phi_ax[2].hist(\n", + " phi3_phsp,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "\n", + "\n", + "mass_ax[0].hist(\n", + " p12_phsp.m,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "mass_ax[1].hist(\n", + " p23_phsp.m,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "mass_ax[2].hist(\n", + " p31_phsp.m,\n", + " bins=100,\n", + " color=\"black\",\n", + " histtype=\"step\",\n", + " label=\"phsp\",\n", + " density=True,\n", + ")\n", + "mass_ax[0].hist(\n", + " p12_phsp.m,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "mass_ax[1].hist(\n", + " p23_phsp.m,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "mass_ax[2].hist(\n", + " p31_phsp.m,\n", + " bins=100,\n", + " weights=BW_intensities,\n", + " color=\"orange\",\n", + " histtype=\"step\",\n", + " label=\"only BW\",\n", + " density=True,\n", + ")\n", + "mass_ax[0].hist(\n", + " p12_phsp.m,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "mass_ax[1].hist(\n", + " p23_phsp.m,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "mass_ax[2].hist(\n", + " p31_phsp.m,\n", + " bins=100,\n", + " weights=SH_intensities,\n", + " color=\"green\",\n", + " histtype=\"step\",\n", + " label=\"only SH\",\n", + " density=True,\n", + ")\n", + "mass_ax[0].hist(\n", + " p12_phsp.m,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "mass_ax[1].hist(\n", + " p23_phsp.m,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "mass_ax[2].hist(\n", + " p31_phsp.m,\n", + " bins=100,\n", + " weights=BW_SH_intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + ")\n", + "\n", + "\n", + "thetaCM_ax[0].hist(\n", + " np.cos(theta1_CM_data),\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + " alpha=0.5,\n", + ")\n", + "thetaCM_ax[1].hist(\n", + " np.cos(theta2_CM_data),\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + " alpha=0.5,\n", + ")\n", + "thetaCM_ax[2].hist(\n", + " np.cos(theta3_CM_data),\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + " alpha=0.5,\n", + ")\n", + "\n", + "phiCM_ax[0].hist(\n", + " phi1_CM_data,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + " alpha=0.5,\n", + ")\n", + "phiCM_ax[1].hist(\n", + " phi2_CM_data,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + " alpha=0.5,\n", + ")\n", + "phiCM_ax[2].hist(\n", + " phi3_CM_data,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + " alpha=0.5,\n", + ")\n", + "\n", + "theta_ax[0].hist(\n", + " np.cos(theta1_data),\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "theta_ax[1].hist(\n", + " np.cos(theta2_data),\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "theta_ax[2].hist(\n", + " np.cos(theta3_data),\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "\n", + "phi_ax[0].hist(\n", + " phi1_data,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "phi_ax[1].hist(\n", + " phi2_data,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "phi_ax[2].hist(\n", + " phi3_data,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "\n", + "mass_ax[0].hist(\n", + " p12_data.m,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "mass_ax[1].hist(\n", + " p23_data.m,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "mass_ax[2].hist(\n", + " p31_data.m,\n", + " bins=100,\n", + " label=\"data\",\n", + " density=True,\n", + ")\n", + "\n", + "thetaCM_ax[0].legend()\n", + "phiCM_ax[0].legend()\n", + "thetaCM_ax[1].legend()\n", + "phiCM_ax[1].legend()\n", + "thetaCM_ax[2].legend()\n", + "phiCM_ax[2].legend()\n", + "\n", + "theta_ax[0].legend()\n", + "phi_ax[0].legend()\n", + "theta_ax[1].legend()\n", + "phi_ax[1].legend()\n", + "theta_ax[2].legend()\n", + "phi_ax[2].legend()\n", + "\n", + "mass_ax[0].legend()\n", + "mass_ax[1].legend()\n", + "mass_ax[2].legend()\n", + "\n", + "fig.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "![](https://github.com/user-attachments/assets/ced99aea-2fb4-4198-9e41-6b3377c5af23)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "(fit-model)=\n", + "## Fitting " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "We now pretend that we do not know the parameter values for the data distribution. In this particular situation, the parameters (to be fitted) are mass $M$ ($M_{12}$, $M_{23}$, and $M_{31}$) and decay width $\\Gamma$ ($\\Gamma_{12}$, $\\Gamma_{23}$, and $\\Gamma_{31}$). We also guess the coefficient values, but note that we keep one coefficient 'fixed'. The reason is that the model is normalized through the likelihood estimator, so what matters for the fits are the _ratios_ between the parameters." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "### Guessed parameter values" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "guessed_parameters = dict(\n", + " M12=1.4,\n", + " Gamma12=0.15,\n", + " M23=1.59,\n", + " Gamma23=0.05,\n", + " M31=1.82,\n", + " Gamma31=0.2,\n", + " # a_minus2 is set to fixed and not changed during fitting\n", + " a_minus1=0.51,\n", + " a_0=3.49,\n", + " a_plus1=4.08,\n", + " a_plus2=2.6,\n", + " b_minus1=-1.2,\n", + " b_0=4.04,\n", + " b_plus1=0.41,\n", + " c_0=2.66,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "mystnb": { + "code_prompt_show": "Function for plotting model" + }, + "tags": [ + "hide-input", + "scroll-input" + ] + }, + "outputs": [], + "source": [ + "def compare_model_to_data(parameters: dict):\n", + " fig, axes = plt.subplots(figsize=(13, 12), ncols=3, nrows=5)\n", + " fig.suptitle(\n", + " R\"CM angles, Helicity angles and invariant mass (Before fitting: starting\"\n", + " R\" parameters for BW $\\times$ SH model)\",\n", + " y=1,\n", + " )\n", + "\n", + " thetaCM_ax, phiCM_ax, theta_ax, phi_ax, mass_ax = axes\n", + " for i, ax in enumerate(thetaCM_ax, 1):\n", + " ax.set_title(thetaCM_subtitles[i - 1])\n", + " ax.set_xticks([-1, 0, 1])\n", + " for i, ax in enumerate(phiCM_ax, 1):\n", + " ax.set_title(phiCM_subtitles[i - 1])\n", + " ax.set_xticks([-np.pi, 0, np.pi])\n", + " ax.set_xticklabels([R\"-$\\pi$\", 0, R\"$\\pi$\"])\n", + " for i, ax in enumerate(theta_ax, 1):\n", + " ax.set_title(theta_subtitles[i - 1])\n", + " ax.set_xticks([-1, 0, 1])\n", + " for i, ax in enumerate(phi_ax, 1):\n", + " ax.set_title(phi_subtitles[i - 1])\n", + " ax.set_xticks([-np.pi, 0, np.pi])\n", + " ax.set_xticklabels([R\"-$\\pi$\", 0, R\"$\\pi$\"])\n", + " for i, ax in enumerate(mass_ax, 1):\n", + " ax.set_title(mass_subtitles[i - 1])\n", + "\n", + " data_kwargs = dict(bins=100, label=\"data\", density=True)\n", + " data_phi_kwargs = dict(bins=50, label=\"data\", density=True)\n", + "\n", + " thetaCM_ax[0].hist(np.cos(theta1_CM_data), **data_kwargs, alpha=0.5)\n", + " thetaCM_ax[1].hist(np.cos(theta2_CM_data), **data_kwargs, alpha=0.5)\n", + " thetaCM_ax[2].hist(np.cos(theta3_CM_data), **data_kwargs, alpha=0.5)\n", + " theta_ax[0].hist(np.cos(theta1_data), **data_kwargs)\n", + " theta_ax[1].hist(np.cos(theta2_data), **data_kwargs)\n", + " theta_ax[2].hist(np.cos(theta3_data), **data_kwargs)\n", + " phiCM_ax[0].hist(phi1_CM_data, **data_phi_kwargs, alpha=0.5)\n", + " phiCM_ax[1].hist(phi2_CM_data, **data_phi_kwargs, alpha=0.5)\n", + " phiCM_ax[2].hist(phi3_CM_data, **data_phi_kwargs, alpha=0.5)\n", + " phi_ax[0].hist(phi1_data, **data_phi_kwargs)\n", + " phi_ax[1].hist(phi2_data, **data_phi_kwargs)\n", + " phi_ax[2].hist(phi3_data, **data_phi_kwargs)\n", + " mass_ax[0].hist(p12_data.m, **data_kwargs)\n", + " mass_ax[1].hist(p23_data.m, **data_kwargs)\n", + " mass_ax[2].hist(p31_data.m, **data_kwargs)\n", + "\n", + " intensities = BW_SH_model(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " s31_phsp,\n", + " phi1_phsp,\n", + " theta1_phsp,\n", + " phi2_phsp,\n", + " theta2_phsp,\n", + " **{**toy_parameters, **parameters},\n", + " )\n", + " model_kwargs = dict(\n", + " bins=100,\n", + " weights=intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + " )\n", + " model_phi_kwargs = dict(\n", + " bins=50,\n", + " weights=intensities,\n", + " color=\"red\",\n", + " histtype=\"step\",\n", + " label=\"BW&SH\",\n", + " density=True,\n", + " )\n", + " thetaCM_ax[0].hist(np.cos(theta1_CM_phsp), **model_kwargs)\n", + " thetaCM_ax[1].hist(np.cos(theta2_CM_phsp), **model_kwargs)\n", + " thetaCM_ax[2].hist(np.cos(theta3_CM_phsp), **model_kwargs)\n", + " phiCM_ax[0].hist(phi1_CM_phsp, **model_phi_kwargs)\n", + " phiCM_ax[1].hist(phi2_CM_phsp, **model_phi_kwargs)\n", + " phiCM_ax[2].hist(phi3_CM_phsp, **model_phi_kwargs)\n", + " theta_ax[0].hist(np.cos(theta1_phsp), **model_kwargs)\n", + " theta_ax[1].hist(np.cos(theta2_phsp), **model_kwargs)\n", + " theta_ax[2].hist(np.cos(theta3_phsp), **model_kwargs)\n", + " phi_ax[0].hist(phi1_phsp, **model_phi_kwargs)\n", + " phi_ax[1].hist(phi2_phsp, **model_phi_kwargs)\n", + " phi_ax[2].hist(phi3_phsp, **model_phi_kwargs)\n", + " mass_ax[0].hist(p12_phsp.m, **model_kwargs)\n", + " mass_ax[1].hist(p23_phsp.m, **model_kwargs)\n", + " mass_ax[2].hist(p31_phsp.m, **model_kwargs)\n", + "\n", + " for ax in axes.flatten():\n", + " ax.legend()\n", + " fig.tight_layout()\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['svg']\n", + "compare_model_to_data(guessed_parameters)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "full-width" + ] + }, + "source": [ + "![](https://github.com/user-attachments/assets/cfe00d5b-4cc1-451c-850e-ac298a548ba7)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "### Estimator: likelihood function" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "To optimize the model parameters so that the model 'fits' the data distribution, we need to define a measure for the 'distance' between the model to the data. The common choice for complicated, multidimensional models like an amplitude model is the **unbinned negative log likelihood function** (NLL)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def unbinned_nll(**parameters: float) -> float:\n", + " phsp = (\n", + " s12_phsp,\n", + " s23_phsp,\n", + " s31_phsp,\n", + " phi1_phsp,\n", + " theta1_phsp,\n", + " phi2_phsp,\n", + " theta2_phsp,\n", + " )\n", + " data = (\n", + " s12_data,\n", + " s23_data,\n", + " s31_data,\n", + " phi1_data,\n", + " theta1_data,\n", + " phi2_data,\n", + " theta2_data,\n", + " )\n", + " model_integral = BW_SH_model(*phsp, **parameters).mean()\n", + " data_intensities = BW_SH_model(*data, **parameters)\n", + " likelihoods = data_intensities / model_integral\n", + " log_likelihood = np.log(likelihoods).sum()\n", + " return -log_likelihood" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{tip}\n", + "The phase space (phsp) is used for the normalization calculation for the unbinned log likelihood function\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "### Optimizer: Minuit" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "As a final step, we find the parameter values that minimize the unbinned Log likelihood function, i.e., we optimize the model with respect to the data distribution. In Python, this can be performed by e.g. [`iminuit`](https://scikit-hep.org/iminuit) package." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "arg_names = tuple(guessed_parameters)\n", + "progress_bar = None\n", + "\n", + "\n", + "def wrapped_estimator(*args: float) -> float:\n", + " # minuit migrad expects positional-argument functions\n", + " global progress_bar\n", + " if progress_bar is None:\n", + " progress_bar = tqdm(desc=\"Running fit\")\n", + " new_kwargs = {\n", + " **toy_parameters,\n", + " **dict(zip(arg_names, args)),\n", + " }\n", + " estimator_value = unbinned_nll(**new_kwargs)\n", + " progress_bar.set_postfix({\"estimator\": estimator_value})\n", + " progress_bar.update()\n", + " return estimator_value" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "optimizer = Minuit(\n", + " wrapped_estimator,\n", + " **guessed_parameters,\n", + " name=guessed_parameters,\n", + ")\n", + "optimizer.errordef = Minuit.LIKELIHOOD" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "keep_output" + ] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e1f140d4a81e41ec956d036a306f86b6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Running fit: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "optimizer.migrad()\n", + "progress_bar.close()\n", + "progress_bar = None # reset progress bar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "### Final fit result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "scroll-input", + "hide-input", + "keep_output" + ] + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ParameterToy valueStarting valueOptimized valueDifference
0a_minus20.00NaNNaN0.0%
1a_minus10.500.510.5183163.7%
2a_03.503.493.6431714.1%
3a_plus14.004.084.1723944.3%
4a_plus22.502.602.6714726.9%
5b_minus1-1.50-1.20-1.600444-6.7%
6b_04.004.044.1207373.0%
7b_plus10.500.410.57866015.7%
8c_02.502.662.5925613.7%
9M121.321.401.3203590.0%
10Gamma120.100.150.1005160.5%
11M231.541.591.5399560.0%
12Gamma230.100.050.0989871.0%
13M311.871.821.8699460.0%
14Gamma310.100.200.1003070.3%
\n", + "
" + ], + "text/plain": [ + " Parameter Toy value Starting value Optimized value Difference\n", + "0 a_minus2 0.00 NaN NaN 0.0%\n", + "1 a_minus1 0.50 0.51 0.518316 3.7%\n", + "2 a_0 3.50 3.49 3.643171 4.1%\n", + "3 a_plus1 4.00 4.08 4.172394 4.3%\n", + "4 a_plus2 2.50 2.60 2.671472 6.9%\n", + "5 b_minus1 -1.50 -1.20 -1.600444 -6.7%\n", + "6 b_0 4.00 4.04 4.120737 3.0%\n", + "7 b_plus1 0.50 0.41 0.578660 15.7%\n", + "8 c_0 2.50 2.66 2.592561 3.7%\n", + "9 M12 1.32 1.40 1.320359 0.0%\n", + "10 Gamma12 0.10 0.15 0.100516 0.5%\n", + "11 M23 1.54 1.59 1.539956 0.0%\n", + "12 Gamma23 0.10 0.05 0.098987 1.0%\n", + "13 M31 1.87 1.82 1.869946 0.0%\n", + "14 Gamma31 0.10 0.20 0.100307 0.3%" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "optimized_parameters = {p.name: p.value for p in optimizer.params}\n", + "pd.DataFrame({\n", + " \"Parameter\": list(toy_parameters.keys()),\n", + " \"Toy value\": list(toy_parameters.values()),\n", + " \"Starting value\": [guessed_parameters.get(name) for name in toy_parameters],\n", + " \"Optimized value\": [optimized_parameters.get(name) for name in toy_parameters],\n", + " \"Difference\": [\n", + " f\"{100 * abs(value - optimized_parameters.get(name, value)) / value if value else 0:.1f}%\"\n", + " for name, value in toy_parameters.items()\n", + " ],\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "Now we see the fit results of CM angles, Helicity angles and invariant mass as a comparison of data in 1D projection plots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['svg']\n", + "compare_model_to_data(optimized_parameters)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "![](https://github.com/user-attachments/assets/f34d1cbc-81c0-4535-bf94-5346bc4df00e)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "#### Dalitz plots of phsp, data, and fit result" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "To conclude our analysis in this document, we examine the Dalitz plots for the following cases: phase space, generated data, default parameters of the model, and fitted parameters of the model.\n", + "\n", + "- Phase Space Dalitz Plot:\n", + " - The plot displays a flat distribution, representing the kinematically allowed region for the reaction. This flat distribution serves as a baseline, showing no dynamical effects or resonant structures.\n", + "- Generated Data Dalitz Plot:\n", + " - This plot is derived from synthetic data that simulates the actual experimental conditions. It includes potential resonances (and background processes). The generated data reflects the physical phenomena expected in the real experimental scenario.\n", + "- Model with Default Parameters Dalitz Plot:\n", + " - Here, the Dalitz plot illustrates the model's predictions using the initial set of parameters. This provides a theoretical reference for comparison with actual data and the fitted model.\n", + "- Fitted Parameters Dalitz Plot:\n", + " - After performing the fitting procedure, this plot shows the model's predictions with optimized parameters. The resonances and their characteristics are clearly visible, indicating the physical information extracted from the data.\n", + " \n", + "By comparing these Dalitz plots, we can observe how the model evolves from theoretical predictions to a fitted solution that aligns with the experimental or generated data. The physical information about possible resonances becomes more apparent, allowing us to extract meaningful insights into the underlying processes of the reaction.\n", + "\n", + "This comprehensive analysis showcases the interplay between kinematics and dynamics in understanding particle interactions, highlighting the power of Partial Wave Analysis in uncovering the nuances of hadronic reactions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [ + "hide-input", + "scroll-input", + "remove-output" + ] + }, + "outputs": [], + "source": [ + "%config InlineBackend.figure_formats = ['png']\n", + "fig, (ax1, ax2) = plt.subplots(figsize=(12, 5), ncols=2, sharey=True)\n", + "fig.suptitle(R\"Dalitz Plots of parameter defaults and fitted parameters\")\n", + "\n", + "hist1 = ax1.hist2d(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " bins=100,\n", + " cmap=\"jet\",\n", + " cmin=1e-6,\n", + " density=True,\n", + " vmax=0.15,\n", + " weights=BW_SH_intensities,\n", + ")\n", + "ax1.set_title(R\"Parameters defaults BW $\\times$ SH model (with cut at max=0.15)\")\n", + "ax1.set_xlabel(R\"$m^2(\\eta \\pi)$\")\n", + "ax1.set_ylabel(R\"$m^2(\\pi p)$\")\n", + "\n", + "BW_SH_intensities_fitted = BW_SH_model(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " s31_phsp,\n", + " phi1_phsp,\n", + " theta1_phsp,\n", + " phi2_phsp,\n", + " theta2_phsp,\n", + " **{**toy_parameters, **optimized_parameters},\n", + ")\n", + "hist2 = ax2.hist2d(\n", + " s12_phsp,\n", + " s23_phsp,\n", + " bins=100,\n", + " cmap=\"jet\",\n", + " cmin=1e-6,\n", + " density=True,\n", + " vmax=0.15,\n", + " weights=BW_SH_intensities_fitted,\n", + ")\n", + "ax2.set_title(R\"Fitted parameters BW $\\times$ SH model (with cut at max=0.15)\")\n", + "ax2.set_xlabel(R\"$m^2(\\eta \\pi)$\")\n", + "ax2.set_ylabel(R\"$m^2(\\pi p)$\")\n", + "\n", + "cbar1 = fig.colorbar(hist1[3], ax=ax1)\n", + "cbar2 = fig.colorbar(hist2[3], ax=ax2)\n", + "cbar3 = fig.colorbar(hist3[3], ax=ax3)\n", + "\n", + "fig.tight_layout()\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "![](https://github.com/user-attachments/assets/9e002900-9e84-414f-bf42-003cc2cf5968)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "## Summary" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "In this document, we have covered the general workflow of PWA, which consists of three major parts:\n", + "\n", + "As a summary, what have we done in this document consists of three major parts:\n", + "- Amplitude Model formulation\n", + " - Introduced the theoretical model for the amplitude of the reaction.\n", + " - Defined the mathematical expressions representing the partial waves and their respective contributions.\n", + " - Incorporated the relevant physical parameters, such as resonance properties. \n", + "- Phase space and data sample generation (or data analysis)\n", + " - Generated phase space distributions for the three-body decay processes.\n", + " - Created synthetic data samples or analyzed experimental data.\n", + " - Ensured that the data accurately reflects the kinematic constraints and dynamics of the reaction. \n", + "- Perform fitting, analyze and evaluate the results\n", + " - Performed a fit of the amplitude model to the generated or experimental data.\n", + " - Used statistical techniques to optimize the model parameters.\n", + " - Analyzed the fit results to extract physical parameters.\n", + " - Evaluated the quality of the fit and the consistency of the model with the data.\n", + " - Interpreted the physical significance of the extracted parameters and identified potential resonances and their properties." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "This structured approach provides a comprehensive understanding of both the reaction dynamics and kinematics, and helps in extracting meaningful physical insights from the data." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + ":::{tip} Extra\n", + "have a look for another example of PWA for $J/\\psi \\to \\gamma \\pi^0 \\pi^0$ by using `ComPWA` at [here](https://tensorwaves.readthedocs.io/stable/amplitude-analysis/) .\n", + ":::" + ] + } + ], + "metadata": { + "colab": { + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pyproject.toml b/pyproject.toml index 2430865f..5992c7d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -271,6 +271,7 @@ split-on-trailing-comma = false "PLW0602", "PLW0603", "PLW2901", + "RUF001", "RUF027", # for _latex_repr_ "S101", "S307", diff --git a/tox.ini b/tox.ini index 4a169cd3..ef8c3fb4 100644 --- a/tox.ini +++ b/tox.ini @@ -21,6 +21,7 @@ description = passenv = * setenv = FORCE_COLOR = 1 + ZFIT_DISABLE_TF_WARNINGS = 1 [testenv:doclive] allowlist_externals = @@ -42,6 +43,8 @@ commands = --re-ignore docs/.*\.yaml \ --re-ignore docs/.*\.yml \ --re-ignore docs/_build/.* \ + --re-ignore docs/_static/exported_intensity_model.py \ + --re-ignore docs/report/data/* \ --watch docs \ docs/ docs/_build/html description = @@ -49,6 +52,7 @@ description = passenv = * setenv = FORCE_COLOR = 1 + ZFIT_DISABLE_TF_WARNINGS = 1 [testenv:docnb] allowlist_externals = @@ -66,6 +70,8 @@ passenv = * setenv = EXECUTE_NB = yes FORCE_COLOR = 1 + TF_CPP_MIN_LOG_LEVEL = 3 + ZFIT_DISABLE_TF_WARNINGS = 1 [testenv:docnblive] allowlist_externals = @@ -87,6 +93,8 @@ commands = --re-ignore docs/.*\.yaml \ --re-ignore docs/.*\.yml \ --re-ignore docs/_build/.* \ + --re-ignore docs/_static/exported_intensity_model.py \ + --re-ignore docs/report/data/* \ --watch docs \ docs/ docs/_build/html description = @@ -95,6 +103,8 @@ passenv = * setenv = EXECUTE_NB = yes FORCE_COLOR = 1 + TF_CPP_MIN_LOG_LEVEL = 3 + ZFIT_DISABLE_TF_WARNINGS = 1 [testenv:docnb-force] allowlist_externals = @@ -107,7 +117,8 @@ passenv = * setenv = FORCE_COLOR = 1 FORCE_EXECUTE_NB = yes - PYTHONHASHSEED = 0 + TF_CPP_MIN_LOG_LEVEL = 3 + ZFIT_DISABLE_TF_WARNINGS = 1 [testenv:jcache] allowlist_externals =