diff --git a/scripts/fetch.py b/scripts/fetch.py index 44a1bfb2..7cd9b270 100644 --- a/scripts/fetch.py +++ b/scripts/fetch.py @@ -197,7 +197,7 @@ def getFilesFromDas(args): .read() .split("\n") ) - + print("Number of files: ", len(flist)) import json dataset = dataset[:-1] if "\n" in dataset else dataset diff --git a/scripts/suball.py b/scripts/suball.py index 7276a6cf..ad8b10ad 100644 --- a/scripts/suball.py +++ b/scripts/suball.py @@ -103,8 +103,6 @@ def get_lumi_from_web(year): # "QCD_mu_sf" ], } - if args.debug: - args.local = True if args.scheme in workflows.keys(): workflow_group["test"] = [args.scheme] args.scheme = "test" @@ -127,7 +125,7 @@ def get_lumi_from_web(year): for wf in workflow_group[args.scheme]: if args.debug: - print(f"Start running {wf} workflow!!!") + print(f"======{wf} in {args.scheme}=====") overwrite = "--overwrite" if args.overwrite else "" ## creating dataset if ( @@ -136,13 +134,24 @@ def get_lumi_from_web(year): ) or args.overwrite ): + if args.debug: + print( + f"Creating MC dataset: python scripts/fetch.py -c {args.campaign} --from_workflow {wf} --DAS_campaign {args.DAS_campaign} --year {args.year} {overwrite} --skipvalidation" + ) os.system( f"python scripts/fetch.py -c {args.campaign} --from_workflow {wf} --DAS_campaign {args.DAS_campaign} --year {args.year} {overwrite} --skipvalidation" ) + if args.debug: + os.system(f"ls metadata/{args.campaign}/*.json are generated") + ## Run the workflows for types in predefined_sample[wf].keys(): + if (types != "data" or types != "MC") and args.scheme == "Validation": continue + print( + f"hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}/hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}.coffea" + ) if ( not os.path.exists( f"hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}/hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}.coffea" @@ -181,12 +190,12 @@ def get_lumi_from_web(year): and "limit" not in key ): runner_config += " --limit 50" - elif args.debug: - runner_config += " --limit 1 --executor iterative" + else: runner_config += f" --{key}={value}" runner_config = runner_config_required + runner_config - print(runner_config) + if args.debug: + print(f"run the workflow: {runner_config}") with open( f"config_{args.year}_{args.campaign}_{args.scheme}_{args.version}.txt", "w", @@ -194,7 +203,8 @@ def get_lumi_from_web(year): config_list.write(runner_config) os.system(runner_config) - + if args.debug: + print(f"workflow is finished for {wf}!") # Get luminosity if ( os.path.exists( @@ -202,10 +212,14 @@ def get_lumi_from_web(year): ) or args.overwrite ): + if args.debug: + print( + f"Get the luminosity from hists_{wf}_data_{args.campaign}_{args.year}_{wf}/hists_{wf}_data_{args.campaign}_{args.year}_{wf}.coffea" + ) lumi = os.popen( f"python scripts/dump_processed.py -t all -c hists_{wf}_data_{args.campaign}_{args.year}_{wf}/hists_{wf}_data_{args.campaign}_{args.year}_{wf}.coffea --json metadata/{args.campaign}/data_{args.campaign}_{args.year}_{wf}.json -n {args.campaign}_{args.year}_{wf}" ).read() - + print(lumi) lumi = int( round( float( @@ -220,14 +234,20 @@ def get_lumi_from_web(year): ) if os.path.exists( f"hists_{wf}_MC_{args.campaign}_{args.year}_{wf}/hists_{wf}_MC_{args.campaign}_{args.year}_{wf}.coffea" + ) and os.path.exists( + f"hists_{wf}_data_{args.campaign}_{args.year}_{wf}/hists_{wf}_data_{args.campaign}_{args.year}_{wf}.coffea" ): print(lumi) + if args.debug: + print(f"Plot the dataMC for {wf}") os.system( f'python scripts/plotdataMC.py -i "hists_{wf}_*_{args.campaign}_{args.year}_{wf}/hists_{wf}_*_{args.campaign}_{args.year}_{wf}.coffea" --lumi {lumi} -p {wf} -v all --ext {args.campaign}_{args.year}{args.version}' ) ## Inspired from Uttiya, create remote directory # https://github.com/cms-btv-pog/BTVNanoCommissioning/blob/14e654feeb4b4d738ee43ab913efb343ea65fd1d/scripts/submit/createremotedir.sh # create remote direcotry + if args.debug: + print(f"Upload plots&coffea to eos: {wf}") if not args.local: os.system(f"mkdir -p {args.campaign}{args.version}/{wf}") os.system(f"cp scripts/index.php {args.campaign}{args.version}/.") diff --git a/src/BTVNanoCommissioning/helpers/update_branch.py b/src/BTVNanoCommissioning/helpers/update_branch.py index ba17d19a..46ef598e 100644 --- a/src/BTVNanoCommissioning/helpers/update_branch.py +++ b/src/BTVNanoCommissioning/helpers/update_branch.py @@ -86,7 +86,9 @@ def missing_branch(events): events.Jet, {"btagPNetCvNotB": jets.btagPNetCvNotB}, ) - if not hasattr(events.Jet, "btagRobustParTAK4CvNotB"): + if not hasattr(events.Jet, "btagRobustParTAK4CvNotB") and hasattr( + events.Jet, "btagRobustParTAK4B" + ): jets = events.Jet jets["btagRobustParTAK4CvNotB"] = ( jets.btagRobustParTAK4CvB diff --git a/src/BTVNanoCommissioning/utils/AK4_parameters.py b/src/BTVNanoCommissioning/utils/AK4_parameters.py index 64350bbb..e02791cb 100644 --- a/src/BTVNanoCommissioning/utils/AK4_parameters.py +++ b/src/BTVNanoCommissioning/utils/AK4_parameters.py @@ -142,5 +142,5 @@ "MC": "calibeHistoWrite_MC2023_Summer23BPix.root", }, }, - "prompt_dataMC": {"lumiMask": "$PROMPT_DATAMC"}, + "prompt_dataMC": {"lumiMask": "Cert_Collisions2024_378981_386951_Golden.json"}, }