Skip to content

Commit

Permalink
Merge pull request #1261 from cta-observatory/energy_systematics_in_irfs
Browse files Browse the repository at this point in the history
Allow for scaling true energy in IRFs to assess energy-scale systematics
  • Loading branch information
rlopezcoto authored Jul 8, 2024
2 parents 27ce931 + e999002 commit ef91c64
Show file tree
Hide file tree
Showing 4 changed files with 153 additions and 9 deletions.
1 change: 1 addition & 0 deletions docs/examples/irf_dl3_tool_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
"true_energy_min": 0.005,
"true_energy_max": 500,
"true_energy_n_bins": 25,
"scale_true_energy": 1.0,
"reco_energy_min": 0.005,
"reco_energy_max": 500,
"reco_energy_n_bins": 25,
Expand Down
5 changes: 5 additions & 0 deletions lstchain/io/event_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,11 @@ class DataBinning(Component):
default_value=25,
).tag(config=True)

scale_true_energy= Float(
help="Scaling value for True energy",
default_value=1.0,
).tag(config=True)

reco_energy_min = Float(
help="Minimum value for Reco Energy bins in TeV units",
default_value=0.005,
Expand Down
28 changes: 26 additions & 2 deletions lstchain/tools/lstchain_create_irf_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,15 @@
If you want to generate source-dependent IRFs, source-dep flag should be activated.
The global alpha cut used to generate IRFs is stored as AL_CUT in the HDU header.
Modified IRFs with true energy scaled by a given factor can be created to evaluate
the systematic uncertainty in the light collection efficiency. This can be done by
setting a value different from one for the "scale_true_energy" argument present in
the DataBinning Component of the configuration file of the IRF creation Tool.
(The true energy of the MC events will be scaled before filling the IRFs histograms
when pyirf commands are used. The effects expected are a non-diagonal energy dispersion
matrix and a different spectrum).
"""

from astropy import table
Expand Down Expand Up @@ -136,7 +145,12 @@ class IRFFITSWriter(Tool):
--global-alpha-cut 10
--source-dep
"""
To build modified IRFs by specifying a scaling factor applying to the true energy (without using a config file):
> lstchain_create_irf_files
-g /path/to/DL2_MC_gamma_file.h5
-o /path/to/irf.fits.gz
--scale-true-energy 1.15
"""

input_gamma_dl2 = traits.Path(
help="Input MC gamma DL2 file",
Expand Down Expand Up @@ -221,6 +235,7 @@ class IRFFITSWriter(Tool):
"global-alpha-cut": "DL3Cuts.global_alpha_cut",
"allowed-tels": "DL3Cuts.allowed_tels",
"overwrite": "IRFFITSWriter.overwrite",
"scale-true-energy": "DataBinning.scale_true_energy"
}

flags = {
Expand Down Expand Up @@ -317,6 +332,12 @@ def start(self):
p["geomag_params"],
) = read_mc_dl2_to_QTable(p["file"])


if self.data_bin.scale_true_energy != 1.0:
p["events"]["true_energy"] *= self.data_bin.scale_true_energy
p["simulation_info"].energy_min *= self.data_bin.scale_true_energy
p["simulation_info"].energy_max *= self.data_bin.scale_true_energy

p["mc_type"] = check_mc_type(p["file"])

self.log.debug(
Expand Down Expand Up @@ -527,7 +548,10 @@ def start(self):
geomag_params["GEOMAG_DELTA"].to_value(u.deg),
"deg",
)

extra_headers["ETRUE_SCALE"]= (
self.data_bin.scale_true_energy
)

if self.point_like:
self.log.info("Generating point_like IRF HDUs")
else:
Expand Down
128 changes: 121 additions & 7 deletions lstchain/tools/tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,11 +381,16 @@ def test_index_dl3_files(temp_dir_observed_files):
assert 2008 in data.obs_table["OBS_ID"]

for hdu_name in [
'EVENTS', 'GTI', 'POINTING',
'EFFECTIVE AREA', 'ENERGY DISPERSION',
'BACKGROUND', 'PSF'
"EVENTS",
"GTI",
"POINTING",
"EFFECTIVE AREA",
"ENERGY DISPERSION",
"BACKGROUND",
"PSF",
]:
assert hdu_name in data.hdu_table['HDU_NAME']
assert hdu_name in data.hdu_table["HDU_NAME"]


@pytest.mark.private_data
def test_index_srcdep_dl3_files(temp_dir_observed_srcdep_files):
Expand All @@ -411,7 +416,116 @@ def test_index_srcdep_dl3_files(temp_dir_observed_srcdep_files):
assert 2008 in data.obs_table["OBS_ID"]

for hdu_name in [
'EVENTS', 'GTI', 'POINTING',
'EFFECTIVE AREA', 'ENERGY DISPERSION'
"EVENTS",
"GTI",
"POINTING",
"EFFECTIVE AREA",
"ENERGY DISPERSION",
]:
assert hdu_name in data.hdu_table['HDU_NAME']
assert hdu_name in data.hdu_table["HDU_NAME"]


@pytest.mark.private_data
def test_add_scale_true_energy_in_irfs(temp_dir_observed_files, simulated_dl2_file):
"""
Checking the validity of modified IRFs after scaling the True Energy by a factor.
"""

import astropy.units as u
from gammapy.irf import EffectiveAreaTable2D, EnergyDispersion2D
from lstchain.tools.lstchain_create_irf_files import IRFFITSWriter

irf_file = temp_dir_observed_files / "fe_irf.fits.gz"
irf_file_mod = temp_dir_observed_files / "mod_irf.fits.gz"
config_file = os.path.join(os.getcwd(), "docs/examples/irf_dl3_tool_config.json")

assert (
run_tool(
IRFFITSWriter(),
argv=[
f"--input-gamma-dl2={simulated_dl2_file}",
f"--input-proton-dl2={simulated_dl2_file}",
f"--input-electron-dl2={simulated_dl2_file}",
f"--output-irf-file={irf_file}",
f"--config={config_file}",
"--overwrite",
"--DataBinning.true_energy_n_bins=2",
"--DataBinning.reco_energy_n_bins=2",
"--DataBinning.true_energy_min: 0.2",
"--DataBinning.true_energy_max: 0.3",
"--DL3Cuts.min_event_p_en_bin=2",
],
cwd=temp_dir_observed_files,
)
== 0
)
assert (
run_tool(
IRFFITSWriter(),
argv=[
f"--input-gamma-dl2={simulated_dl2_file}",
f"--input-proton-dl2={simulated_dl2_file}",
f"--input-electron-dl2={simulated_dl2_file}",
f"--output-irf-file={irf_file_mod}",
f"--config={config_file}",
"--overwrite",
"--DataBinning.true_energy_n_bins=2",
"--DataBinning.reco_energy_n_bins=2",
"--DataBinning.true_energy_min: 0.2",
"--DataBinning.true_energy_max: 0.3",
"--DL3Cuts.min_event_p_en_bin=2",
"--DataBinning.scale_true_energy=1.15",
],
cwd=temp_dir_observed_files,
)
== 0
)

aeff_hdu = EffectiveAreaTable2D.read(irf_file, hdu="EFFECTIVE AREA")
aeff_mod_hdu = EffectiveAreaTable2D.read(irf_file_mod, hdu="EFFECTIVE AREA")

edisp_hdu = EnergyDispersion2D.read(irf_file, hdu="ENERGY DISPERSION")
edisp_mod_hdu = EnergyDispersion2D.read(irf_file_mod, hdu="ENERGY DISPERSION")

assert aeff_mod_hdu.data.shape == aeff_hdu.data.shape
assert edisp_mod_hdu.data.shape == edisp_hdu.data.shape

edisp = EnergyDispersion2D.read(irf_file)
edisp_mod = EnergyDispersion2D.read(irf_file_mod)

e_migra = edisp.axes["migra"].center
e_migra_mod = edisp_mod.axes["migra"].center

e_true_list = [0.2, 2, 20]
e_migra_prob = []
e_migra_prob_mod = []

for i in e_true_list:
e_true = i * u.TeV
e_migra_prob.append(
edisp.evaluate(
offset=0.4 * u.deg,
energy_true=e_true,
migra=e_migra,
)
)
e_migra_prob_mod.append(
edisp_mod.evaluate(
offset=0.4 * u.deg,
energy_true=e_true,
migra=e_migra_mod,
)
)

# Check that the maximum of the density probability of the migration has shifted
order_max = []
order_max_mod = []
for idx, _ in enumerate(e_true_list):
for j in range(len(e_migra)):
if e_migra_prob[idx][j] > e_migra_prob[idx][j - 1]:
order_max.append(j)
if e_migra_prob_mod[idx][j] > e_migra_prob_mod[idx][j - 1]:
order_max_mod.append(j)

for i in range(len(order_max)):
assert order_max[i] != order_max_mod[i]

0 comments on commit ef91c64

Please sign in to comment.