From d4705870283176e7d2fd90f69ae49128217ecb6e Mon Sep 17 00:00:00 2001 From: mvdbeek Date: Mon, 23 Sep 2024 17:19:45 +0200 Subject: [PATCH] Fix data_column ref to nested collection --- lib/galaxy/tools/parameters/basic.py | 12 +++++++---- lib/galaxy_test/api/test_tools.py | 30 ++++++++++++++++++++++++++++ lib/galaxy_test/base/populators.py | 4 ++-- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/lib/galaxy/tools/parameters/basic.py b/lib/galaxy/tools/parameters/basic.py index 279adde7a92e..9eef495cb153 100644 --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -1457,8 +1457,8 @@ def get_column_list(self, trans, other_values): # Use representative dataset if a dataset collection is parsed if isinstance(dataset, HistoryDatasetCollectionAssociation): dataset = dataset.to_hda_representative() - if isinstance(dataset, DatasetCollectionElement) and dataset.hda: - dataset = dataset.hda + if isinstance(dataset, DatasetCollectionElement): + dataset = dataset.first_dataset_instance() if isinstance(dataset, HistoryDatasetAssociation) and self.ref_input and self.ref_input.formats: direct_match, target_ext, converted_dataset = dataset.find_conversion_destination( self.ref_input.formats @@ -1553,9 +1553,13 @@ def is_file_empty(self, trans, other_values): for dataset in util.listify(other_values.get(self.data_ref)): # Use representative dataset if a dataset collection is parsed if isinstance(dataset, HistoryDatasetCollectionAssociation): - dataset = dataset.to_hda_representative() + if dataset.populated: + dataset = dataset.to_hda_representative() + else: + # That's fine, we'll check again on execution + return True if isinstance(dataset, DatasetCollectionElement): - dataset = dataset.hda + dataset = dataset.first_dataset_instance() if isinstance(dataset, DatasetInstance): return not dataset.has_data() if is_runtime_value(dataset): diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py index 78a4b3e05c92..56fea13f8285 100644 --- a/lib/galaxy_test/api/test_tools.py +++ b/lib/galaxy_test/api/test_tools.py @@ -2481,6 +2481,36 @@ def test_implicit_reduce_with_mapping(self): ) assert output_hdca["collection_type"] == "list" + @skip_without_tool("column_multi_param") + def test_multi_param_column_nested_list(self): + with self.dataset_populator.test_history() as history_id: + hdca = self.dataset_collection_populator.create_list_of_list_in_history( + history_id, ext="tabular", wait=True + ).json() + inputs = { + "input1": {"src": "hdca", "id": hdca["id"]}, + # FIXME: integers don't work here + "col": "1", + } + response = self._run("column_multi_param", history_id, inputs, assert_ok=True) + self.dataset_populator.wait_for_job(job_id=response["jobs"][0]["id"], assert_ok=True) + + @skip_without_tool("column_multi_param") + def test_multi_param_column_nested_list_fails_on_invalid_column(self): + with self.dataset_populator.test_history() as history_id: + hdca = self.dataset_collection_populator.create_list_of_list_in_history( + history_id, ext="tabular", wait=True + ).json() + inputs = { + "input1": {"src": "hdca", "id": hdca["id"]}, + "col": "10", + } + try: + self._run("column_multi_param", history_id, inputs, assert_ok=True) + except AssertionError as e: + exception_raised = e + assert exception_raised, "Expected invalid column selection to fail job" + @skip_without_tool("column_multi_param") def test_implicit_conversion_and_reduce(self): with self.dataset_populator.test_history() as history_id: diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index 4d1c46ca5309..014880ebd1e6 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -2898,7 +2898,7 @@ def __create_payload(self, history_id: str, *args, **kwds): else: return self.__create_payload_collection(history_id, *args, **kwds) - def __create_payload_fetch(self, history_id: str, collection_type, **kwds): + def __create_payload_fetch(self, history_id: str, collection_type, ext="txt", **kwds): contents = None if "contents" in kwds: contents = kwds["contents"] @@ -2920,7 +2920,7 @@ def __create_payload_fetch(self, history_id: str, collection_type, **kwds): elements.append(contents_level) continue - element = {"src": "pasted", "ext": "txt"} + element = {"src": "pasted", "ext": ext} # Else older style list of contents or element ID and contents, # convert to fetch API. if isinstance(contents_level, tuple):