From 15a7a9fbb496cd0675657dcf6a9e885ad6cf153d Mon Sep 17 00:00:00 2001 From: Donald Campbell <125581724+donaldcampbelljr@users.noreply.github.com> Date: Wed, 13 Mar 2024 14:59:56 -0400 Subject: [PATCH] fix for #467 --- looper/conductor.py | 7 +++-- tests/smoketests/test_other.py | 56 ++++++++++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/looper/conductor.py b/looper/conductor.py index 52e92117..dd59663b 100644 --- a/looper/conductor.py +++ b/looper/conductor.py @@ -306,6 +306,9 @@ def add_sample(self, sample, rerun=False): use_this_sample = True # default to running this sample msg = None + if rerun and sample_statuses == []: + msg = f"> Skipping sample because rerun requested, but no failed or waiting flag found." + use_this_sample = False if sample_statuses: status_str = ", ".join(sample_statuses) failed_flag = any("failed" in x for x in sample_statuses) @@ -314,13 +317,13 @@ def add_sample(self, sample, rerun=False): msg = f"> Found existing status: {status_str}. Ignoring." else: # this pipeline already has a status msg = f"> Found existing status: {status_str}. Skipping sample." - if failed_flag: + if failed_flag and not rerun: msg += " Use rerun to ignore failed status." # help guidance use_this_sample = False if rerun: # Rescue the sample if rerun requested, and failed flag is found if failed_flag or waiting_flag: - msg = f"> Re-running failed sample. Status: {status_str}" + msg = f"> Re-running sample. Status: {status_str}" use_this_sample = True else: msg = f"> Skipping sample because rerun requested, but no failed or waiting flag found. Status: {status_str}" diff --git a/tests/smoketests/test_other.py b/tests/smoketests/test_other.py index 9b142980..1ad41bbb 100644 --- a/tests/smoketests/test_other.py +++ b/tests/smoketests/test_other.py @@ -10,7 +10,8 @@ import pandas as pd -def _make_flags(cfg, type, pipeline_name): +def _make_flags_pipestat(cfg, type, pipeline_name): + """This makes flags for projects where pipestat is configured and used""" # get flag dir from .looper.yaml with open(cfg, "r") as f: @@ -33,6 +34,31 @@ def _make_flags(cfg, type, pipeline_name): f.write(type) +def _make_flags(cfg, type, pipeline_name): + """This makes flags for projects where pipestat is NOT configured""" + + # get flag dir from .looper.yaml + with open(cfg, "r") as f: + looper_cfg_data = safe_load(f) + output_dir = looper_cfg_data[OUTDIR_KEY] + + output_dir = os.path.join(os.path.dirname(cfg), output_dir) + # get samples from the project config via Peppy + project_config_path = get_project_config_path(cfg) + p = Project(project_config_path) + + for s in p.samples: + # Make flags in sample subfolder, e.g /tmp/tmphqxdmxnl/advanced/results/results_pipeline/sample1 + sf = os.path.join(output_dir, "results_pipeline", s.sample_name) + if not os.path.exists(sf): + os.makedirs(sf) + flag_path = os.path.join( + sf, pipeline_name + "_" + s.sample_name + "_" + type + ".flag" + ) + with open(flag_path, "w") as f: + f.write(type) + + class TestLooperPipestat: @pytest.mark.parametrize("cmd", ["report", "table", "check"]) @@ -69,7 +95,7 @@ class TestLooperRerun: def test_pipestat_rerun(self, prep_temp_pep_pipestat, pipeline_name, flags): """Verify that rerun works with either failed or waiting flags""" tp = prep_temp_pep_pipestat - _make_flags(tp, flags, pipeline_name) + _make_flags_pipestat(tp, flags, pipeline_name) x = ["rerun", "--looper-config", tp] try: @@ -79,6 +105,24 @@ def test_pipestat_rerun(self, prep_temp_pep_pipestat, pipeline_name, flags): assert result["Jobs submitted"] == 2 + @pytest.mark.parametrize( + "flags", [FLAGS[2], FLAGS[3]] + ) # Waiting and Failed flags should work + @pytest.mark.parametrize("pipeline_name", ["PIPELINE1"]) + def test_rerun_no_pipestat(self, prep_temp_pep, pipeline_name, flags): + """Verify that rerun works with either failed or waiting flags""" + tp = prep_temp_pep + _make_flags(tp, flags, pipeline_name) + + x = ["rerun", "--looper-config", tp] + try: + result = main(test_args=x) + except Exception: + raise pytest.fail("DID RAISE {0}".format(Exception)) + + # Only 3 failed flags exist for PIPELINE1, so only 3 samples should be submitted + assert result["Jobs submitted"] == 3 + class TestLooperCheck: @pytest.mark.parametrize("flag_id", FLAGS) @@ -88,7 +132,7 @@ class TestLooperCheck: def test_check_works(self, prep_temp_pep_pipestat, flag_id, pipeline_name): """Verify that checking works""" tp = prep_temp_pep_pipestat - _make_flags(tp, flag_id, pipeline_name) + _make_flags_pipestat(tp, flag_id, pipeline_name) x = ["check", "--looper-config", tp] @@ -106,8 +150,8 @@ def test_check_works(self, prep_temp_pep_pipestat, flag_id, pipeline_name): def test_check_multi(self, prep_temp_pep_pipestat, flag_id, pipeline_name): """Verify that checking works when multiple flags are created""" tp = prep_temp_pep_pipestat - _make_flags(tp, flag_id, pipeline_name) - _make_flags(tp, FLAGS[1], pipeline_name) + _make_flags_pipestat(tp, flag_id, pipeline_name) + _make_flags_pipestat(tp, FLAGS[1], pipeline_name) x = ["check", "--looper-config", tp] # Multiple flag files SHOULD cause pipestat to throw an assertion error @@ -120,7 +164,7 @@ def test_check_multi(self, prep_temp_pep_pipestat, flag_id, pipeline_name): def test_check_bogus(self, prep_temp_pep_pipestat, flag_id, pipeline_name): """Verify that checking works when bogus flags are created""" tp = prep_temp_pep_pipestat - _make_flags(tp, flag_id, pipeline_name) + _make_flags_pipestat(tp, flag_id, pipeline_name) x = ["check", "--looper-config", tp] try: