diff --git a/.github/workflows/BTA_workflow.yml b/.github/workflows/BTA_workflow.yml index f76f1956..059af75d 100644 --- a/.github/workflows/BTA_workflow.yml +++ b/.github/workflows/BTA_workflow.yml @@ -87,9 +87,9 @@ jobs: - name: BTA workflow test run: | - python runner.py --wf BTA --json metadata/test_bta_run3.json --campaign Summer22Run3 --executor iterative --isJERC + python runner.py --wf BTA --json metadata/test_bta_run3.json --campaign Summer22Run3 --executor iterative - name: BTA_ttbar workflow test run: | - python runner.py --wf BTA_ttbar --json metadata/test_bta_run3.json --campaign Summer22Run3 --executor iterative --isJERC + python runner.py --wf BTA_ttbar --json metadata/test_bta_run3.json --campaign Summer22Run3 --executor iterative diff --git a/.github/workflows/ctag_DY_workflow.yml b/.github/workflows/ctag_DY_workflow.yml index 282381a3..b835ea95 100644 --- a/.github/workflows/ctag_DY_workflow.yml +++ b/.github/workflows/ctag_DY_workflow.yml @@ -86,14 +86,14 @@ jobs: - name: Compile JEC run: | - python -m BTVNanoCommissioning.utils.compile_jec 2017_UL jec_compiled + python -m BTVNanoCommissioning.utils.compile_jec Summer22Run3 jec_compiled - name: ctag muon DY workflows with correctionlib run: | - python runner.py --workflow ctag_DY_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --year 2017 --max 5 --chunk 50000 + python runner.py --workflow ctag_DY_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray --year 2022 - name: ctag electron DY workflows with correctionlib run: | - python runner.py --workflow ectag_DY_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --year 2017 --max 5 --chunk 50000 + python runner.py --workflow ectag_DY_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray --year 2022 diff --git a/.github/workflows/ctag_Wc_workflow.yml b/.github/workflows/ctag_Wc_workflow.yml index 49c99b10..8a00fb52 100644 --- a/.github/workflows/ctag_Wc_workflow.yml +++ b/.github/workflows/ctag_Wc_workflow.yml @@ -88,16 +88,16 @@ jobs: - name: Compile JEC run: | - python -m BTVNanoCommissioning.utils.compile_jec 2017_UL jec_compiled + python -m BTVNanoCommissioning.utils.compile_jec Summer22Run3 jec_compiled - name: ctag muon W+c workflows with correctionlib run: | - python runner.py --workflow ctag_Wc_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ctag_Wc_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray - name: ctag electron W+c workflows with correctionlib run: | - python runner.py --workflow ectag_Wc_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ectag_Wc_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray diff --git a/.github/workflows/ttbar_DL_workflow.yml b/.github/workflows/ttbar_DL_workflow.yml index 5e68a415..781547f1 100644 --- a/.github/workflows/ttbar_DL_workflow.yml +++ b/.github/workflows/ttbar_DL_workflow.yml @@ -87,19 +87,19 @@ jobs: - name: Compile JEC run: | - python -m BTVNanoCommissioning.utils.compile_jec 2017_UL jec_compiled + python -m BTVNanoCommissioning.utils.compile_jec Summer22Run3 jec_compiled - name: btag dileptonic ttbar workflows with correctionlib run: | - python runner.py --workflow ttdilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ttdilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray - name: ctag dileptonic muon ttbar workflows with correctionlib run: | - python runner.py --workflow ctag_ttdilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ctag_ttdilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray - name: ctag dileptonic electron ttbar workflows with correctionlib run: | - python runner.py --workflow ectag_ttdilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ectag_ttdilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray - name: ctag dileptonic emu ttbar workflows with correctionlib run: | - python runner.py --workflow emctag_ttdilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow emctag_ttdilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray diff --git a/.github/workflows/ttbar_SL_workflow.yml b/.github/workflows/ttbar_SL_workflow.yml index d48efebc..953627a5 100644 --- a/.github/workflows/ttbar_SL_workflow.yml +++ b/.github/workflows/ttbar_SL_workflow.yml @@ -87,19 +87,19 @@ jobs: - name: Compile JEC run: | - python -m BTVNanoCommissioning.utils.compile_jec 2017_UL jec_compiled + python -m BTVNanoCommissioning.utils.compile_jec Summer22Run3 jec_compiled - name: btag semileptonic ttbar workflows with correctionlib run: | - python runner.py --workflow ttsemilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ttsemilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray - name: ctag semileptonic muon ttbar workflows with correctionlib run: | - python runner.py --workflow ctag_ttsemilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ctag_ttsemilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray - name: ctag semileptonic electron ttbar workflows with correctionlib run: | - python runner.py --workflow ectag_ttsemilep_sf --json metadata/nano_UL17_test.json --limit 1 --executor futures -j 6 --campaign 2017_UL --isCorr --isJERC --isSyst all --isArray --max 5 --chunk 50000 + python runner.py --workflow ectag_ttsemilep_sf --json metadata/test_bta_run3.json --limit 1 --executor futures -j 6 --campaign Summer22Run3 --isSyst all --isArray diff --git a/README.md b/README.md index 92e7d37e..85c7a6a4 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ More options for `runner.py`

``` ---wf {validation,ttcom,ttdilep_sf,ttsemilep_sf,emctag_ttdilep_sf,ctag_ttdilep_sf,ectag_ttdilep_sf,ctag_ttsemilep_sf,ectag_ttsemilep_sf,ctag_Wc_sf,ectag_Wc_sf,ctag_DY_sf,ectag_DY_sf,BTA,BTA_addPFMuons,BTA_addAllTracks}, --workflow {validation,ttcom,ttdilep_sf,ttsemilep_sf,emctag_ttdilep_sf,ctag_ttdilep_sf,ectag_ttdilep_sf,ctag_ttsemilep_sf,ectag_ttsemilep_sf,ctag_Wc_sf,ectag_Wc_sf,ctag_DY_sf,ectag_DY_sf,BTA,BTA_addPFMuons,BTA_addAllTracks} +--wf {validation,ttcom,ttdilep_sf,ttsemilep_sf,emctag_ttdilep_sf,ctag_ttdilep_sf,ectag_ttdilep_sf,ctag_ttsemilep_sf,ectag_ttsemilep_sf,ctag_Wc_sf,ectag_Wc_sf,ctag_DY_sf,ectag_DY_sf,BTA,BTA_addPFMuons,BTA_addAllTracks,BTA_ttbar}, --workflow {validation,ttcom,ttdilep_sf,ttsemilep_sf,emctag_ttdilep_sf,ctag_ttdilep_sf,ectag_ttdilep_sf,ctag_ttsemilep_sf,ectag_ttsemilep_sf,ctag_Wc_sf,ectag_Wc_sf,ctag_DY_sf,ectag_DY_sf,BTA,BTA_addPFMuons,BTA_addAllTracks,BTA_ttbar} Which processor to run -o OUTPUT, --output OUTPUT Output histogram filename (default: hists.coffea) @@ -73,14 +73,18 @@ More options for `runner.py` --year YEAR Year --campaign CAMPAIGN Dataset campaign, change the corresponding correction files{ "Rereco17_94X","Winter22Run3","Summer22Run3","Summer22EERun3","2018_UL","2017_UL","2016preVFP_UL","2016postVFP_UL"} - --isCorr Run with SFs - --isJERC JER/JEC implemented to jet --isSyst Run with systematics, all, weights_only(no JERC uncertainties included),JERC_split, None(not extract) --isArray Output root files --noHist Not save histogram coffea files --overwrite Overwrite existing files - --executor {iterative,futures,parsl/slurm,parsl/condor,parsl/condor/naf_lite,dask/condor,dask/slurm,dask/lpc,dask/lxplus,dask/casa} - The type of executor to use (default: futures). + --executor {iterative,futures,parsl/slurm,parsl/condor,parsl/condor/naf_lite,dask/condor,dask/condor/brux,dask/slurm,dask/lpc,dask/lxplus,dask/casa} + The type of executor to use (default: futures). Other options can be implemented. For + example see https://parsl.readthedocs.io/en/stable/userguide/configuring.html- + `parsl/slurm` - tested at DESY/Maxwell- `parsl/condor` - tested at DESY, RWTH- + `parsl/condor/naf_lite` - tested at DESY- `dask/condor/brux` - tested at BRUX (Brown U)- + `dask/slurm` - tested at DESY/Maxwell- `dask/condor` - tested at DESY, RWTH- `dask/lpc` - + custom lpc/condor setup (due to write access restrictions)- `dask/lxplus` - custom + lxplus/condor setup (due to port restrictions) -j WORKERS, --workers WORKERS Number of workers (cores/threads) to use for multi- worker executors (e.g. futures or condor) (default: 3) @@ -140,13 +144,13 @@ After a small test, you can run the full campaign for a dedicated phase space, s - Dileptonic ttbar phase space : check performance for btag SFs, emu channel ``` - python runner.py --workflow ttdilep_sf --json metadata/data_Winter22_emu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 --isJERC --isCorr (--executor ${scaleout_site}) + python runner.py --workflow ttdilep_sf --json metadata/data_Winter22_emu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 (--executor ${scaleout_site}) ``` - Semileptonic ttbar phase space : check performance for btag SFs, muon channel ``` -python runner.py --workflow ttsemilep_sf --json metadata/data_Winter22_mu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 --isJERC --isCorr (--executor ${scaleout_site}) +python runner.py --workflow ttsemilep_sf --json metadata/data_Winter22_mu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 (--executor ${scaleout_site}) ```

@@ -159,26 +163,26 @@ python runner.py --workflow ttsemilep_sf --json metadata/data_Winter22_mu_BTV_Ru - Dileptonic ttbar phase space : check performance for charm SFs, bjets enriched SFs, muon channel ``` -python runner.py --workflow ctag_ttdilep_sf --json metadata/data_Winter22_mumu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 --isJERC --isCorr (--executor ${scaleout_site}) +python runner.py --workflow ctag_ttdilep_sf --json metadata/data_Winter22_mumu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022(--executor ${scaleout_site}) ``` - Semileptonic ttbar phase space : check performance for charm SFs, bjets enriched SFs, muon channel ``` -python runner.py --workflow ctag_ttsemilep_sf --json metadata/data_Winter22_mu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 --isJERC --isCorr (--executor ${scaleout_site}) +python runner.py --workflow ctag_ttsemilep_sf --json metadata/data_Winter22_mu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022(--executor ${scaleout_site}) ``` - W+c phase space : check performance for charm SFs, cjets enriched SFs, muon channel ``` -python runner.py --workflow ctag_Wc_sf --json metadata/data_Winter22_mu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 --isJERC --isCorr (--executor ${scaleout_site}) +python runner.py --workflow ctag_Wc_sf --json metadata/data_Winter22_mu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022(--executor ${scaleout_site}) ``` - DY phase space : check performance for charm SFs, light jets enriched SFs, muon channel ``` -python runner.py --workflow ctag_DY_sf --json metadata/data_Winter22_mumu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022 --isJERC --isCorr (--executor ${scaleout_site}) +python runner.py --workflow ctag_DY_sf --json metadata/data_Winter22_mumu_BTV_Run3_2022_Comm_v1.json --campaign Winter22Run3 --year 2022(--executor ${scaleout_site}) ```

@@ -612,7 +616,7 @@ python scripts/make_template.py -i "testfile/*.coffea" --lumi 7650 -o test.root ## Notes for developers The BTV tutorial for coffea part is under `notebooks` and the template to construct new workflow is `src/BTVNanoCommissioning/workflows/example.py` -Here are some tips provided for developers working on their forked version of this repository. +Here are some tips provided for developers working on their forked version of this repository. Also some useful git commands can be found [here](https://codimd.web.cern.ch/wY3IrOBBT3i3GXIQxLMWPA) ### Setup CI pipeline for fork branch Since the CI pipelines involve reading files via `xrootd` and access gitlab.cern.ch, you need to save some secrets in your forked directory. diff --git a/runner.py b/runner.py index e792c190..b7830523 100644 --- a/runner.py +++ b/runner.py @@ -87,7 +87,6 @@ def get_main_parser(): ], help="Dataset campaign, change the corresponding correction files", ) - parser.add_argument("--isCorr", action="store_true", help="Run with SFs") parser.add_argument( "--isSyst", default=None, @@ -95,9 +94,6 @@ def get_main_parser(): choices=[None, "all", "weight_only", "JERC_split"], help="Run with systematics, all, weights_only(no JERC uncertainties included),JERC_split, None", ) - parser.add_argument( - "--isJERC", action="store_true", help="JER/JEC implemented to jet" - ) parser.add_argument("--isArray", action="store_true", help="Output root files") parser.add_argument( "--noHist", action="store_true", help="Not output coffea histogram" @@ -317,8 +313,6 @@ def get_main_parser(): args.year, args.campaign, f"{args.workflow}_{(sample_json).rstrip('.json')}", - args.isCorr, - args.isJERC, args.isSyst, args.isArray, args.noHist, diff --git a/src/BTVNanoCommissioning/workflows/BTA_producer.py b/src/BTVNanoCommissioning/workflows/BTA_producer.py index 33a99417..c31c2ea7 100644 --- a/src/BTVNanoCommissioning/workflows/BTA_producer.py +++ b/src/BTVNanoCommissioning/workflows/BTA_producer.py @@ -24,8 +24,6 @@ def __init__( year="2022", campaign="Summer22Run3", name="", - isCorr=False, - isJERC=True, isSyst=False, isArray=True, noHist=False, @@ -36,7 +34,7 @@ def __init__( self._year = year self._campaign = campaign self.chunksize = chunksize - self.isJERC = isJERC + self.SF_map = load_SF(self._campaign) # addPFMuons: if true, include the TrkInc and PFMuon collections, used by QCD based SF methods # addAllTracks: if true, include the Track collection used for JP calibration; @@ -54,7 +52,7 @@ def process(self, events): events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): shifts = JME_shifts( shifts, self.SF_map, events, self._campaign, isRealData, False, True ) diff --git a/src/BTVNanoCommissioning/workflows/BTA_ttbar_producer.py b/src/BTVNanoCommissioning/workflows/BTA_ttbar_producer.py index 2b36e8cc..44affbf2 100644 --- a/src/BTVNanoCommissioning/workflows/BTA_ttbar_producer.py +++ b/src/BTVNanoCommissioning/workflows/BTA_ttbar_producer.py @@ -18,8 +18,7 @@ def __init__( self, year="2022", campaign="Summer22Run3", - isCorr=False, - isJERC=True, + name="", isSyst=False, isArray=True, noHist=False, @@ -28,7 +27,8 @@ def __init__( self._year = year self._campaign = campaign self.chunksize = chunksize - self.isJERC = isJERC + + self.name = name self.SF_map = load_SF(self._campaign) ### Custom initialzations for BTA_ttbar workflow ### @@ -47,7 +47,7 @@ def process(self, events): events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): shifts = JME_shifts( shifts, self.SF_map, events, self._campaign, isRealData, False, True ) diff --git a/src/BTVNanoCommissioning/workflows/ctag_DY_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_DY_valid_sf.py index 1745045a..648563db 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_DY_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_DY_valid_sf.py @@ -32,8 +32,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -42,17 +40,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize - print(self._campaign) ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -63,7 +58,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -120,7 +115,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8"] @@ -255,7 +252,7 @@ def process_shift(self, events, shift_name): weights.add("genweight", events[event_level].genWeight) par_flav = (sel_jet.partonFlavour == 0) & (sel_jet.hadronFlavour == 0) genflavor = sel_jet.hadronFlavour + 1 * par_flav - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -368,7 +365,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if not isRealData and self.isCorr and "btag" in self.SF_map.keys(): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=genflavor, diff --git a/src/BTVNanoCommissioning/workflows/ctag_Wc_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_Wc_valid_sf.py index de7e312a..ff0fc4ce 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_Wc_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_Wc_valid_sf.py @@ -36,8 +36,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -46,16 +44,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -66,7 +62,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -122,7 +118,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["IsoMu27"] @@ -158,9 +156,6 @@ def process_shift(self, events, shift_name): iso_muindx = iso_muindx[:, 0] ## Jet cuts - jet_sel = jet_id(events, self._campaign) & ( - ak.all(events.Jet.metric_table(iso_muon) > 0.5, axis=2, mask_identity=True) - ) if "DeepJet_nsv" in events.Jet.fields: jet_sel = jet_sel & (events.Jet.DeepJet_nsv > 0) event_jet = events.Jet[jet_sel] @@ -193,6 +188,16 @@ def process_shift(self, events, shift_name): False, axis=-1, ) + soft_muon = ak.pad_none(soft_muon, 1, axis=1) + + ## Muon-jet cuts + mu_jet = event_jet[mujetsel] + otherjets = event_jet[~mujetsel] + req_mujet = ak.num(mu_jet.pt, axis=1) >= 1 + mu_jet = ak.pad_none(mu_jet, 1, axis=1) + + ## store jet index for PFCands, create mask on the jet index + jet_selpf = (jet_sel) & (mujetsel2) if "DeepJet_nsv" in events.Jet.fields: jet_selpf = jet_selpf & (events.Jet.DeepJet_nsv > 0) jetindx = ak.mask(ak.local_index(events.Jet.pt), jet_selpf == True) @@ -280,6 +285,7 @@ def process_shift(self, events, shift_name): ssmu = soft_muon[event_level] smet = MET[event_level] smuon_jet = mu_jet[event_level] + sotherjets = otherjets[event_level] nsoftmu = ak.count(ssmu.pt, axis=1) nmujet = ak.count(smuon_jet.pt, axis=1) smuon_jet = smuon_jet[:, 0] @@ -310,7 +316,7 @@ def process_shift(self, events, shift_name): smflav = smuon_jet.hadronFlavour + 1 * ( (smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0) ) - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -442,11 +448,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, @@ -509,10 +511,11 @@ def process_shift(self, events, shift_name): if self.isArray: # Keep the structure of events and pruned the object size pruned_ev = events[event_level] - pruned_ev.Jet = sjets - pruned_ev.Muon = shmu + pruned_ev["Jet"] = sjets + pruned_ev["Muon"] = shmu pruned_ev["MuonJet"] = smuon_jet pruned_ev["SoftMuon"] = ssmu + pruned_ev["OtherJets"] = sotherjets pruned_ev["osss"] = osss if "PFCands" in events.fields: pruned_ev.PFCands = spfcands @@ -538,7 +541,7 @@ def process_shift(self, events, shift_name): out_branch, np.where( (out_branch == "SoftMuon") - | (out_branch == "MuonJet") + # | (out_branch == "MuonJet") | (out_branch == "dilep") ), ) @@ -548,7 +551,7 @@ def process_shift(self, events, shift_name): "Muon", "Jet", "SoftMuon", - "MuonJet", + # "MuonJet", "dilep", "charge", "MET", diff --git a/src/BTVNanoCommissioning/workflows/ctag_dileptt_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_dileptt_valid_sf.py index f3478353..584b4aae 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_dileptt_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_dileptt_valid_sf.py @@ -19,6 +19,7 @@ update, uproot_writeable, _is_rootcompat, + dump_lumi, ) from BTVNanoCommissioning.helpers.update_branch import missing_branch @@ -37,8 +38,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -47,16 +46,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -67,7 +64,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -124,7 +121,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8"] @@ -286,7 +285,7 @@ def process_shift(self, events, shift_name): genflavor = sjets.hadronFlavour + 1 * par_flav smpu = (smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0) smflav = 1 * smpu + smuon_jet.hadronFlavour - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -414,11 +413,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, diff --git a/src/BTVNanoCommissioning/workflows/ctag_eDY_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_eDY_valid_sf.py index dd461f62..4735f221 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_eDY_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_eDY_valid_sf.py @@ -32,8 +32,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -42,16 +40,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -62,7 +58,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -118,7 +114,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["Ele23_Ele12_CaloIdL_TrackIdL_IsoVL"] @@ -256,7 +254,7 @@ def process_shift(self, events, shift_name): weights.add("genweight", events[event_level].genWeight) par_flav = (sel_jet.partonFlavour == 0) & (sel_jet.hadronFlavour == 0) genflavor = sel_jet.hadronFlavour + 1 * par_flav - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -369,7 +367,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if not isRealData and self.isCorr and "btag" in self.SF_map.keys(): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=genflavor, diff --git a/src/BTVNanoCommissioning/workflows/ctag_eWc_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_eWc_valid_sf.py index 01ad1214..d0beb01a 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_eWc_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_eWc_valid_sf.py @@ -38,8 +38,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -48,16 +46,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -68,7 +64,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -124,7 +120,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["Ele32_WPTight_Gsf_L1DoubleEG"] @@ -161,9 +159,7 @@ def process_shift(self, events, shift_name): iso_eindx = iso_eindx[:, 0] ## Jet cuts - jet_sel = jet_id(events, self._campaign) & ( - ak.all(events.Jet.metric_table(iso_ele) > 0.5, axis=2, mask_identity=True) - ) + if "DeepJet_nsv" in events.Jet.fields: jet_sel = jet_sel & (events.Jet.DeepJet_nsv > 0) event_jet = events.Jet[jet_sel] @@ -189,37 +185,18 @@ def process_shift(self, events, shift_name): False, axis=-1, ) + + ## Muon-jet cuts soft_muon = ak.pad_none(soft_muon, 1, axis=1) ## Muon-jet cuts - mu_jet = event_jet[ - ( - ak.all( - event_jet.metric_table(soft_muon) <= 0.4, axis=2, mask_identity=True - ) - ) - & ((event_jet.muonIdx1 != -1) | (event_jet.muonIdx2 != -1)) - ] + mu_jet = event_jet[mujetsel] + otherjets = event_jet[~mujetsel] req_mujet = ak.num(mu_jet.pt, axis=1) >= 1 mu_jet = ak.pad_none(mu_jet, 1, axis=1) ## store jet index for PFCands, create mask on the jet index - jet_selpf = ( - jet_id(events, self._campaign) - & ( - ak.all( - events.Jet.metric_table(iso_ele) > 0.5, axis=2, mask_identity=True - ) - ) - & ( - ak.all( - events.Jet.metric_table(soft_muon) <= 0.4, - axis=2, - mask_identity=True, - ) - ) - & ((events.Jet.muonIdx1 != -1) | (events.Jet.muonIdx2 != -1)) - ) + jet_selpf = (jet_sel) & (mujetsel2) if "DeepJet_nsv" in events.Jet.fields: jet_selpf = jet_selpf & (events.Jet.DeepJet_nsv > 0) jetindx = ak.mask(ak.local_index(events.Jet.pt), jet_selpf == True) @@ -301,6 +278,7 @@ def process_shift(self, events, shift_name): ssmu = soft_muon[event_level] smet = MET[event_level] smuon_jet = mu_jet[event_level] + sotherjets = otherjets[event_level] nsoftmu = ak.count(ssmu.pt, axis=1) nmujet = ak.count(smuon_jet.pt, axis=1) smuon_jet = smuon_jet[:, 0] @@ -331,7 +309,7 @@ def process_shift(self, events, shift_name): smflav = smuon_jet.hadronFlavour + 1 * ( (smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0) ) - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -463,11 +441,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, @@ -530,9 +504,10 @@ def process_shift(self, events, shift_name): if self.isArray: # Keep the structure of events and pruned the object size pruned_ev = events[event_level] - pruned_ev.Jet = sjets - pruned_ev.Muon = shmu + pruned_ev["Jet"] = sjets + pruned_ev["Muon"] = shmu pruned_ev["MuonJet"] = smuon_jet + pruned_ev["OtherJets"] = sotherjets pruned_ev["SoftMuon"] = ssmu pruned_ev["osss"] = osss if "PFCands" in events.fields: @@ -559,7 +534,7 @@ def process_shift(self, events, shift_name): out_branch, np.where( (out_branch == "SoftMuon") - | (out_branch == "MuonJet") + # | (out_branch == "MuonJet") | (out_branch == "dilep") ), ) @@ -569,7 +544,7 @@ def process_shift(self, events, shift_name): "Muon", "Jet", "SoftMuon", - "MuonJet", + # "MuonJet", "dilep", "charge", "MET", diff --git a/src/BTVNanoCommissioning/workflows/ctag_edileptt_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_edileptt_valid_sf.py index adbcb971..f0b458ee 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_edileptt_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_edileptt_valid_sf.py @@ -37,8 +37,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -47,16 +45,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -67,7 +63,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -122,7 +118,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["Ele23_Ele12_CaloIdL_TrackIdL_IsoVL"] @@ -284,7 +282,7 @@ def process_shift(self, events, shift_name): genflavor = sjets.hadronFlavour + 1 * par_flav smpu = (smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0) smflav = 1 * smpu + smuon_jet.hadronFlavour - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -411,11 +409,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, diff --git a/src/BTVNanoCommissioning/workflows/ctag_emdileptt_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_emdileptt_valid_sf.py index f8187056..629290cb 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_emdileptt_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_emdileptt_valid_sf.py @@ -37,8 +37,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -47,16 +45,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -67,7 +63,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -123,7 +119,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT trigger_he = [ @@ -329,7 +327,7 @@ def process_shift(self, events, shift_name): genflavor = sjets.hadronFlavour + 1 * par_flav smpu = (smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0) smflav = 1 * smpu + smuon_jet.hadronFlavour - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -465,11 +463,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, diff --git a/src/BTVNanoCommissioning/workflows/ctag_ettsemilep_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_ettsemilep_valid_sf.py index 0fb46ccf..b432cc5e 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_ettsemilep_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_ettsemilep_valid_sf.py @@ -39,8 +39,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -49,16 +47,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -69,7 +65,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -127,7 +123,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["Ele32_WPTight_Gsf_L1DoubleEG"] @@ -326,7 +324,7 @@ def process_shift(self, events, shift_name): 1 * ((smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0)) + smuon_jet.hadronFlavour ) - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -455,11 +453,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, diff --git a/src/BTVNanoCommissioning/workflows/ctag_semileptt_valid_sf.py b/src/BTVNanoCommissioning/workflows/ctag_semileptt_valid_sf.py index f2ddea86..67dfa541 100644 --- a/src/BTVNanoCommissioning/workflows/ctag_semileptt_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ctag_semileptt_valid_sf.py @@ -38,8 +38,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -48,16 +46,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -68,7 +64,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -125,7 +121,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["IsoMu24", "IsoMu27"] @@ -342,7 +340,7 @@ def process_shift(self, events, shift_name): smflav = smuon_jet.hadronFlavour + 1 * ( (smuon_jet.partonFlavour == 0) & (smuon_jet.hadronFlavour == 0) ) - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -471,11 +469,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=smflav, diff --git a/src/BTVNanoCommissioning/workflows/example.py b/src/BTVNanoCommissioning/workflows/example.py index e73478bb..012333b0 100644 --- a/src/BTVNanoCommissioning/workflows/example.py +++ b/src/BTVNanoCommissioning/workflows/example.py @@ -36,8 +36,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -46,16 +44,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -67,7 +63,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" # JEC splitted into 11 sources instead of JES_total @@ -126,7 +122,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = [ @@ -182,7 +180,7 @@ def process_shift(self, events, shift_name): par_flav = (sjets.partonFlavour == 0) & (sjets.hadronFlavour == 0) genflavor = sjets.hadronFlavour + 1 * par_flav # Load SFs - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = ( True if self.isSyst != None else False ) # load systematic flag diff --git a/src/BTVNanoCommissioning/workflows/ttbar_validation.py b/src/BTVNanoCommissioning/workflows/ttbar_validation.py index 000d2486..2e892f18 100644 --- a/src/BTVNanoCommissioning/workflows/ttbar_validation.py +++ b/src/BTVNanoCommissioning/workflows/ttbar_validation.py @@ -39,8 +39,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -49,16 +47,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -69,7 +65,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -123,7 +119,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = [ @@ -189,7 +187,7 @@ def process_shift(self, events, shift_name): par_flav = (sjets.partonFlavour == 0) & (sjets.hadronFlavour == 0) genflavor = sjets.hadronFlavour + 1 * par_flav genweiev = ak.flatten(ak.broadcast_arrays(weights.weight(), sjets["pt"])[0]) - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( diff --git a/src/BTVNanoCommissioning/workflows/ttdilep_valid_sf.py b/src/BTVNanoCommissioning/workflows/ttdilep_valid_sf.py index e0d0f892..ae2b00bb 100644 --- a/src/BTVNanoCommissioning/workflows/ttdilep_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ttdilep_valid_sf.py @@ -36,8 +36,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -46,16 +44,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -67,7 +63,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -124,7 +120,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = [ @@ -259,7 +257,7 @@ def process_shift(self, events, shift_name): weights.add("genweight", events[event_level].genWeight) par_flav = (sjets.partonFlavour == 0) & (sjets.hadronFlavour == 0) genflavor = sjets.hadronFlavour + 1 * par_flav - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -366,11 +364,7 @@ def process_shift(self, events, shift_name): ), weight=weights.partial_weight(exclude=exclude_btv), ) - if ( - not isRealData - and self.isCorr - and "btag" in self.SF_map.keys() - ): + if not isRealData and "btag" in self.SF_map.keys(): h.fill( syst=syst, flav=flatten(genflavor[:, i]), diff --git a/src/BTVNanoCommissioning/workflows/ttsemilep_valid_sf.py b/src/BTVNanoCommissioning/workflows/ttsemilep_valid_sf.py index d80cc667..7489d552 100644 --- a/src/BTVNanoCommissioning/workflows/ttsemilep_valid_sf.py +++ b/src/BTVNanoCommissioning/workflows/ttsemilep_valid_sf.py @@ -35,8 +35,6 @@ def __init__( year="2017", campaign="Rereco17_94X", name="", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -45,16 +43,14 @@ def __init__( self._year = year self._campaign = campaign self.name = name - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.isArray = isArray self.noHist = noHist self.lumiMask = load_lumi(self._campaign) self.chunksize = chunksize ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -65,7 +61,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -121,7 +117,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["IsoMu24"] @@ -240,7 +238,7 @@ def process_shift(self, events, shift_name): weights.add("genweight", events[event_level].genWeight) par_flav = (sjets.partonFlavour == 0) & (sjets.hadronFlavour == 0) genflavor = sjets.hadronFlavour + 1 * par_flav - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei( @@ -341,7 +339,6 @@ def process_shift(self, events, shift_name): ) if ( not isRealData - and self.isCorr and "btag" in self.SF_map.keys() and "_b" not in histname and "_bb" not in histname diff --git a/src/BTVNanoCommissioning/workflows/validation.py b/src/BTVNanoCommissioning/workflows/validation.py index 4542c5df..5e076fbe 100644 --- a/src/BTVNanoCommissioning/workflows/validation.py +++ b/src/BTVNanoCommissioning/workflows/validation.py @@ -36,8 +36,6 @@ def __init__( self, year="2017", campaign="Rereco17_94X", - isCorr=True, - isJERC=False, isSyst=False, isArray=False, noHist=False, @@ -45,14 +43,12 @@ def __init__( ): self._year = year self._campaign = campaign - self.isCorr = isCorr - self.isJERC = isJERC + self.isSyst = isSyst self.lumiMask = load_lumi(self._campaign) ## Load corrections - if isCorr: - self.SF_map = load_SF(self._campaign) + self.SF_map = load_SF(self._campaign) @property def accumulator(self): @@ -63,7 +59,7 @@ def process(self, events): dataset = events.metadata["dataset"] events = missing_branch(events) shifts = [] - if "JME" in self.SF_map.keys() and self.isJERC: + if "JME" in self.SF_map.keys(): syst_JERC = True if self.isSyst != None else False if self.isSyst == "JERC_split": syst_JERC = "split" @@ -120,7 +116,9 @@ def process_shift(self, events, shift_name): req_lumi = np.ones(len(events), dtype="bool") if isRealData: req_lumi = self.lumiMask(events.run, events.luminosityBlock) - output = dump_lumi(events[req_lumi], output) + # only dump for nominal case + if shift_name is None: + output = dump_lumi(events[req_lumi], output) ## HLT triggers = ["IsoMu24"] @@ -197,7 +195,7 @@ def process_shift(self, events, shift_name): genweiev = ak.flatten( ak.broadcast_arrays(events[event_level].genWeight, sjets["pt"])[0] ) - if self.isCorr: + if len(self.SF_map.keys()) > 0: syst_wei = True if self.isSyst != None else False if "PU" in self.SF_map.keys(): puwei(