diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..9846a80c --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,29 @@ + +# This file is a template, and might need editing before it works on your project. +# This is a sample GitLab CI/CD configuration file that should run without any modifications. +# It demonstrates a basic 3 stage CI/CD pipeline. Instead of real tests or scripts, +# it uses echo commands to simulate the pipeline execution. +# +# A pipeline is composed of independent jobs that run scripts, grouped into stages. +# Stages run in sequential order, but jobs within stages run in parallel. +# +# For more information, see: https://docs.gitlab.com/ee/ci/yaml/index.html#stages +# +# You can copy and paste this template into a new `.gitlab-ci.yml` file. +# You should not add this template to an existing `.gitlab-ci.yml` file by using the `include:` keyword. +# +# To contribute improvements to CI/CD templates, please follow the Development guide at: +# https://docs.gitlab.com/ee/development/cicd/templates.html +# This specific template is located at: +# https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Getting-Started.gitlab-ci.yml + +stages: # List of stages for jobs, and their order of execution + - deploy + +deploy-job: # This job runs in the deploy stage. + stage: deploy # It only runs when *both* jobs in the test stage complete successfully. + script: + - 'curl --fail --request POST --form token=$MY_TRIGGER_TOKEN --form ref=master "https://gitlab.cern.ch/cms-analysis/btv/software-and-algorithms/autobtv/trigger/pipeline"' + rules: + - if: $CI_COMMIT_TAG + environment: production \ No newline at end of file diff --git a/scripts/fetch.py b/scripts/fetch.py index 44a1bfb2..bb217123 100644 --- a/scripts/fetch.py +++ b/scripts/fetch.py @@ -61,7 +61,7 @@ parser.add_argument( "--whitelist_sites", help="White list fot sites", - default="T2_DE_DESY,T2_DE_RWTH,T2_CH_CERN", + default=None, ) parser.add_argument( "--blacklist_sites", @@ -197,7 +197,7 @@ def getFilesFromDas(args): .read() .split("\n") ) - + print("Number of files: ", len(flist)) import json dataset = dataset[:-1] if "\n" in dataset else dataset diff --git a/scripts/suball.py b/scripts/suball.py index 7276a6cf..dfc32253 100644 --- a/scripts/suball.py +++ b/scripts/suball.py @@ -103,8 +103,6 @@ def get_lumi_from_web(year): # "QCD_mu_sf" ], } - if args.debug: - args.local = True if args.scheme in workflows.keys(): workflow_group["test"] = [args.scheme] args.scheme = "test" @@ -127,7 +125,7 @@ def get_lumi_from_web(year): for wf in workflow_group[args.scheme]: if args.debug: - print(f"Start running {wf} workflow!!!") + print(f"======{wf} in {args.scheme}=====") overwrite = "--overwrite" if args.overwrite else "" ## creating dataset if ( @@ -136,13 +134,25 @@ def get_lumi_from_web(year): ) or args.overwrite ): + if args.debug: + print( + f"Creating MC dataset: python scripts/fetch.py -c {args.campaign} --from_workflow {wf} --DAS_campaign {args.DAS_campaign} --year {args.year} {overwrite} --skipvalidation" + ) + os.system( f"python scripts/fetch.py -c {args.campaign} --from_workflow {wf} --DAS_campaign {args.DAS_campaign} --year {args.year} {overwrite} --skipvalidation" ) + if args.debug: + os.system(f"ls metadata/{args.campaign}/*.json") + ## Run the workflows for types in predefined_sample[wf].keys(): + if (types != "data" or types != "MC") and args.scheme == "Validation": continue + print( + f"hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}/hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}.coffea" + ) if ( not os.path.exists( f"hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}/hists_{wf}_{types}_{args.campaign}_{args.year}_{wf}.coffea" @@ -181,12 +191,12 @@ def get_lumi_from_web(year): and "limit" not in key ): runner_config += " --limit 50" - elif args.debug: - runner_config += " --limit 1 --executor iterative" + else: runner_config += f" --{key}={value}" runner_config = runner_config_required + runner_config - print(runner_config) + if args.debug: + print(f"run the workflow: {runner_config}") with open( f"config_{args.year}_{args.campaign}_{args.scheme}_{args.version}.txt", "w", @@ -194,7 +204,8 @@ def get_lumi_from_web(year): config_list.write(runner_config) os.system(runner_config) - + if args.debug: + print(f"workflow is finished for {wf}!") # Get luminosity if ( os.path.exists( @@ -202,10 +213,14 @@ def get_lumi_from_web(year): ) or args.overwrite ): + if args.debug: + print( + f"Get the luminosity from hists_{wf}_data_{args.campaign}_{args.year}_{wf}/hists_{wf}_data_{args.campaign}_{args.year}_{wf}.coffea" + ) lumi = os.popen( f"python scripts/dump_processed.py -t all -c hists_{wf}_data_{args.campaign}_{args.year}_{wf}/hists_{wf}_data_{args.campaign}_{args.year}_{wf}.coffea --json metadata/{args.campaign}/data_{args.campaign}_{args.year}_{wf}.json -n {args.campaign}_{args.year}_{wf}" ).read() - + print(lumi) lumi = int( round( float( @@ -220,14 +235,20 @@ def get_lumi_from_web(year): ) if os.path.exists( f"hists_{wf}_MC_{args.campaign}_{args.year}_{wf}/hists_{wf}_MC_{args.campaign}_{args.year}_{wf}.coffea" + ) and os.path.exists( + f"hists_{wf}_data_{args.campaign}_{args.year}_{wf}/hists_{wf}_data_{args.campaign}_{args.year}_{wf}.coffea" ): print(lumi) + if args.debug: + print(f"Plot the dataMC for {wf}") os.system( f'python scripts/plotdataMC.py -i "hists_{wf}_*_{args.campaign}_{args.year}_{wf}/hists_{wf}_*_{args.campaign}_{args.year}_{wf}.coffea" --lumi {lumi} -p {wf} -v all --ext {args.campaign}_{args.year}{args.version}' ) ## Inspired from Uttiya, create remote directory # https://github.com/cms-btv-pog/BTVNanoCommissioning/blob/14e654feeb4b4d738ee43ab913efb343ea65fd1d/scripts/submit/createremotedir.sh # create remote direcotry + if args.debug: + print(f"Upload plots&coffea to eos: {wf}") if not args.local: os.system(f"mkdir -p {args.campaign}{args.version}/{wf}") os.system(f"cp scripts/index.php {args.campaign}{args.version}/.") diff --git a/src/BTVNanoCommissioning/helpers/update_branch.py b/src/BTVNanoCommissioning/helpers/update_branch.py index ba17d19a..46ef598e 100644 --- a/src/BTVNanoCommissioning/helpers/update_branch.py +++ b/src/BTVNanoCommissioning/helpers/update_branch.py @@ -86,7 +86,9 @@ def missing_branch(events): events.Jet, {"btagPNetCvNotB": jets.btagPNetCvNotB}, ) - if not hasattr(events.Jet, "btagRobustParTAK4CvNotB"): + if not hasattr(events.Jet, "btagRobustParTAK4CvNotB") and hasattr( + events.Jet, "btagRobustParTAK4B" + ): jets = events.Jet jets["btagRobustParTAK4CvNotB"] = ( jets.btagRobustParTAK4CvB