diff --git a/lib/galaxy/tool_util/parameters/__init__.py b/lib/galaxy/tool_util/parameters/__init__.py index 423d4e950bf5..ac6ca0dbbf3c 100644 --- a/lib/galaxy/tool_util/parameters/__init__.py +++ b/lib/galaxy/tool_util/parameters/__init__.py @@ -62,6 +62,7 @@ repeat_inputs_to_array, validate_explicit_conditional_test_value, visit_input_values, + VISITOR_NO_REPLACEMENT, ) __all__ = ( @@ -116,6 +117,7 @@ "keys_starting_with", "visit_input_values", "repeat_inputs_to_array", + "VISITOR_NO_REPLACEMENT", "decode", "encode", "WorkflowStepToolState", diff --git a/lib/galaxy/tool_util/parameters/case.py b/lib/galaxy/tool_util/parameters/case.py index 3c0459d033a3..b3bb725dd136 100644 --- a/lib/galaxy/tool_util/parameters/case.py +++ b/lib/galaxy/tool_util/parameters/case.py @@ -12,10 +12,12 @@ from packaging.version import Version from galaxy.tool_util.parser.interface import ( + TestCollectionDef, ToolSource, ToolSourceTest, ToolSourceTestInput, ToolSourceTestInputs, + xml_data_input_to_json, ) from galaxy.util import asbool from .factory import input_models_for_tool_source @@ -25,6 +27,7 @@ ConditionalWhen, DataCollectionParameterModel, DataColumnParameterModel, + DataParameterModel, FloatParameterModel, IntegerParameterModel, RepeatParameterModel, @@ -250,7 +253,22 @@ def _merge_into_state( test_input = _input_for(state_path, inputs) if test_input is not None: if isinstance(tool_input, (DataCollectionParameterModel,)): - input_value = test_input.get("attributes", {}).get("collection") + input_value = TestCollectionDef.from_dict( + test_input.get("attributes", {}).get("collection") + ).test_format_to_dict() + elif isinstance(tool_input, (DataParameterModel,)): + data_tool_input = cast(DataParameterModel, tool_input) + if data_tool_input.multiple: + test_input_values = test_input["value"].split(",") + input_value_list = [] + for test_input_value in test_input_values: + instance_test_input = test_input.copy() + instance_test_input["value"] = test_input_value + input_value = xml_data_input_to_json(test_input) + input_value_list.append(input_value) + input_value = input_value_list + else: + input_value = xml_data_input_to_json(test_input) else: input_value = test_input["value"] input_value = legacy_from_string(tool_input, input_value, warnings, profile) diff --git a/lib/galaxy/tool_util/parameters/convert.py b/lib/galaxy/tool_util/parameters/convert.py index 14caed47e92c..617771374e1e 100644 --- a/lib/galaxy/tool_util/parameters/convert.py +++ b/lib/galaxy/tool_util/parameters/convert.py @@ -13,6 +13,7 @@ from .state import ( RequestInternalToolState, RequestToolState, + TestCaseToolState, ) from .visitor import ( visit_input_values, @@ -71,3 +72,28 @@ def encode_callback(parameter: ToolParameterT, value: Any): request_state = RequestToolState(request_state_dict) request_state.validate(input_models) return request_state + + +def encode_test( + test_case_state: TestCaseToolState, + inputs_models: ToolParameterBundle, + uploads, +): + def encode_callback(parameter: ToolParameterT, value: Any): + if parameter.parameter_type == "gx_data": + assert isinstance(value, dict), str(value) + assert "id" in value + encoded_dict = value.copy() + encoded_dict["id"] = encode_id(value["id"]) + return encoded_dict + else: + return VISITOR_NO_REPLACEMENT + + request_state_dict = visit_input_values( + input_models, + external_state, + encode_callback, + ) + request_state = RequestToolState(request_state_dict) + request_state.validate(input_models) + return request_state diff --git a/lib/galaxy/tool_util/parameters/models.py b/lib/galaxy/tool_util/parameters/models.py index 976195ef440f..30d6b5a5a483 100644 --- a/lib/galaxy/tool_util/parameters/models.py +++ b/lib/galaxy/tool_util/parameters/models.py @@ -41,7 +41,8 @@ from galaxy.exceptions import RequestParameterInvalidException from galaxy.tool_util.parser.interface import ( DrillDownOptionsDict, - TestCollectionDefDict, + JsonTestCollectionDefDict, + JsonTestDatasetDefDict, ) from ._types import ( cast_as_type, @@ -312,9 +313,9 @@ def py_type_internal(self) -> Type: def py_type_test_case(self) -> Type: base_model: Type if self.multiple: - base_model = str + base_model = list_type(JsonTestDatasetDefDict) else: - base_model = str + base_model = JsonTestDatasetDefDict return optional_if_needed(base_model, self.optional) def pydantic_template(self, state_representation: StateRepresentationT) -> DynamicModelInformation: @@ -372,7 +373,7 @@ def pydantic_template(self, state_representation: StateRepresentationT) -> Dynam elif state_representation == "workflow_step_linked": return dynamic_model_information_from_py_type(self, ConnectedValue) elif state_representation == "test_case_xml": - return dynamic_model_information_from_py_type(self, TestCollectionDefDict) + return dynamic_model_information_from_py_type(self, JsonTestCollectionDefDict) else: raise NotImplementedError( f"Have not implemented data collection parameter models for state representation {state_representation}" @@ -1179,9 +1180,7 @@ def to_simple_model(input_parameter: Union[ToolParameterModel, ToolParameterT]) return cast(ToolParameterT, input_parameter) -def simple_input_models( - parameters: Union[List[ToolParameterModel], List[ToolParameterT]] -) -> Iterable[ToolParameterT]: +def simple_input_models(parameters: Union[List[ToolParameterModel], List[ToolParameterT]]) -> Iterable[ToolParameterT]: return [to_simple_model(m) for m in parameters] diff --git a/lib/galaxy/tool_util/parser/interface.py b/lib/galaxy/tool_util/parser/interface.py index a7b3896c289f..955740efba9c 100644 --- a/lib/galaxy/tool_util/parser/interface.py +++ b/lib/galaxy/tool_util/parser/interface.py @@ -8,6 +8,7 @@ from os.path import join from typing import ( Any, + cast, Dict, List, Optional, @@ -550,7 +551,8 @@ def parse_input_sources(self) -> List[InputSource]: """Return a list of InputSource objects.""" -TestCollectionDefElementObject = Union["TestCollectionDefDict", "ToolSourceTestInput"] +AnyTestCollectionDefDict = Union["JsonTestCollectionDefDict", "XmlTestCollectionDefDict"] +TestCollectionDefElementObject = Union[AnyTestCollectionDefDict, "ToolSourceTestInput"] TestCollectionAttributeDict = Dict[str, Any] CollectionType = str @@ -560,7 +562,11 @@ class TestCollectionDefElementDict(TypedDict): element_definition: TestCollectionDefElementObject -class TestCollectionDefDict(TypedDict): +# two versions of collection inputs can be parsed out, XmlTestCollectionDefDict is historically +# used by tools and Galaxy internals and exposed in the API via the test definition endpoints for +# tool execution. JsonTestCollectionDefDict is the format consumed by Planemo that mirrors a CWL +# way of defining inputs. +class XmlTestCollectionDefDict(TypedDict): model_class: Literal["TestCollectionDef"] attributes: TestCollectionAttributeDict collection_type: CollectionType @@ -568,6 +574,84 @@ class TestCollectionDefDict(TypedDict): name: str +JsonTestDatasetDefDict = TypedDict( + "JsonTestDatasetDefDict", + { + "class": Literal["File"], + "path": NotRequired[Optional[str]], + "location": NotRequired[Optional[str]], + "name": NotRequired[Optional[str]], + "dbkey": NotRequired[Optional[str]], + "filetype": NotRequired[Optional[str]], + "composite_data": NotRequired[Optional[List[str]]], + "tags": NotRequired[Optional[List[str]]], + }, +) + +JsonTestCollectionDefElementDict = Union[ + "JsonTestCollectionDefDatasetElementDict", "JsonTestCollectionDefCollectionElementDict" +] +JsonTestCollectionDefDatasetElementDict = TypedDict( + "JsonTestCollectionDefDatasetElementDict", + { + "identifier": str, + "class": Literal["File"], + "path": NotRequired[Optional[str]], + "location": NotRequired[Optional[str]], + "name": NotRequired[Optional[str]], + "dbkey": NotRequired[Optional[str]], + "filetype": NotRequired[Optional[str]], + "composite_data": NotRequired[Optional[List[str]]], + "tags": NotRequired[Optional[List[str]]], + }, +) +JsonTestCollectionDefCollectionElementDict = TypedDict( + "JsonTestCollectionDefCollectionElementDict", + { + "identifier": str, + "class": Literal["Collection"], + "collection_type": str, + "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]], + }, +) +JsonTestCollectionDefDict = TypedDict( + "JsonTestCollectionDefDict", + { + "class": Literal["Collection"], + "collection_type": str, + "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]], + "name": NotRequired[Optional[str]], + }, +) + + +def xml_data_input_to_json(xml_input: ToolSourceTestInput) -> "JsonTestDatasetDefDict": + attributes = xml_input["attributes"] + as_dict: JsonTestDatasetDefDict = {} + as_dict["class"] = "File" + value = xml_input["value"] + if value: + as_dict["path"] = value + _copy_if_exists(attributes, as_dict, "location") + _copy_if_exists(attributes, as_dict, "dbkey") + _copy_if_exists(attributes, as_dict, "ftype", "filetype") + _copy_if_exists(attributes, as_dict, "composite_data", only_if_value=True) + tags = attributes.get("tags") + if tags: + as_dict["tags"] = [t.strip() for t in tags.split(",")] + return as_dict + + +def _copy_if_exists(attributes, as_dict, name: str, as_name: Optional[str] = None, only_if_value: bool = False): + if name in attributes: + value = attributes[name] + if not value and only_if_value: + return + if as_name is None: + as_name = name + as_dict[as_name] = value + + class TestCollectionDef: __test__ = False # Prevent pytest from discovering this class (issue #12071) @@ -577,7 +661,33 @@ def __init__(self, attrib, name, collection_type, elements): self.elements = elements self.name = name - def to_dict(self) -> TestCollectionDefDict: + def _test_format_to_dict(self) -> "JsonTestCollectionDefDict": + + def to_element(xml_element_dict: "TestCollectionDefElementDict") -> "JsonTestCollectionDefElementDict": + identifier = xml_element_dict["element_identifier"] + element_object = xml_element_dict["element_definition"] + if isinstance(element_object, TestCollectionDef): + as_dict = element_object._test_format_to_dict() + as_dict["identifier"] = identifier + else: + as_dict = xml_data_input_to_json(element_object) + as_dict["identifier"] = identifier + return as_dict + + test_format_dict = { + "class": "Collection", + "elements": list(map(to_element, self.elements)), + "collection_type": self.collection_type, + } + return test_format_dict + + def test_format_to_dict(self) -> JsonTestCollectionDefDict: + test_format_dict = self._test_format_to_dict() + if self.name: + test_format_dict["name"] = self.name + return test_format_dict + + def to_dict(self) -> XmlTestCollectionDefDict: def element_to_dict(element_dict): element_identifier, element_def = element_dict["element_identifier"], element_dict["element_definition"] if isinstance(element_def, TestCollectionDef): @@ -596,23 +706,47 @@ def element_to_dict(element_dict): } @staticmethod - def from_dict(as_dict: TestCollectionDefDict): - assert as_dict["model_class"] == "TestCollectionDef" - - def element_from_dict(element_dict): - if "element_definition" not in element_dict: - raise Exception(f"Invalid element_dict {element_dict}") - element_def = element_dict["element_definition"] - if element_def.get("model_class", None) == "TestCollectionDef": - element_def = TestCollectionDef.from_dict(element_def) - return {"element_identifier": element_dict["element_identifier"], "element_definition": element_def} - - return TestCollectionDef( - attrib=as_dict["attributes"], - name=as_dict["name"], - elements=list(map(element_from_dict, as_dict["elements"] or [])), - collection_type=as_dict["collection_type"], - ) + def from_dict(as_dict: AnyTestCollectionDefDict) -> "TestCollectionDef": + if "model_class" in as_dict: + xml_as_dict = cast(XmlTestCollectionDefDict, as_dict) + assert xml_as_dict["model_class"] == "TestCollectionDef" + + def element_from_dict(element_dict): + if "element_definition" not in element_dict: + raise Exception(f"Invalid element_dict {element_dict}") + element_def = element_dict["element_definition"] + if element_def.get("model_class", None) == "TestCollectionDef": + element_def = TestCollectionDef.from_dict(element_def) + return {"element_identifier": element_dict["element_identifier"], "element_definition": element_def} + + return TestCollectionDef( + attrib=xml_as_dict["attributes"], + name=xml_as_dict["name"], + elements=list(map(element_from_dict, xml_as_dict["elements"] or [])), + collection_type=xml_as_dict["collection_type"], + ) + else: + json_as_dict = cast(JsonTestCollectionDefDict, as_dict) + + def element_from_dict_json( + element_dict: JsonTestCollectionDefElementDict, + ) -> TestCollectionDefElementObject: + element_class = element.get("class") + identifier = element_dict["identifier"] + if element_class == "Collection": + element_def = TestCollectionDef.from_dict(element_dict) + else: + value = element_dict["path"] # todo handle location + element_def = {"name": name, "value": value, "attributes": {}} + return {"element_identifier": identifier, "element_definition": element_def} + + elements = list(map(element_from_dict_json, json_as_dict.get("elements", []))) + return TestCollectionDef( + attrib={}, + name=json_as_dict.get("name") or "Unnamed Collection", + elements=elements, + collection_type=xml_as_dict["collection_type"], + ) def collect_inputs(self): inputs = [] diff --git a/lib/galaxy/tool_util/parser/xml.py b/lib/galaxy/tool_util/parser/xml.py index 9ab4a30f65b6..8ab0bfe485c9 100644 --- a/lib/galaxy/tool_util/parser/xml.py +++ b/lib/galaxy/tool_util/parser/xml.py @@ -45,7 +45,6 @@ PageSource, PagesSource, RequiredFiles, - TestCollectionDefDict, TestCollectionDefElementDict, TestCollectionDefElementObject, TestCollectionOutputDef, @@ -58,6 +57,7 @@ ToolSourceTestOutputAttributes, ToolSourceTestOutputs, ToolSourceTests, + XmlTestCollectionDefDict, XrefDict, ) from .output_actions import ToolOutputActionGroup @@ -1001,7 +1001,7 @@ def __parse_inputs_elems(test_elem, i) -> ToolSourceTestInputs: return raw_inputs -def _test_collection_def_dict(elem: Element) -> TestCollectionDefDict: +def _test_collection_def_dict(elem: Element) -> XmlTestCollectionDefDict: elements: List[TestCollectionDefElementDict] = [] attrib: Dict[str, Any] = _element_to_dict(elem) collection_type = attrib["type"] @@ -1017,7 +1017,7 @@ def _test_collection_def_dict(elem: Element) -> TestCollectionDefDict: element_definition = __parse_param_elem(element) elements.append({"element_identifier": element_identifier, "element_definition": element_definition}) - return TestCollectionDefDict( + return XmlTestCollectionDefDict( model_class="TestCollectionDef", attributes=attrib, collection_type=collection_type, diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py index 781fc3bd30fb..2f83b9704a1a 100644 --- a/lib/galaxy/tool_util/verify/interactor.py +++ b/lib/galaxy/tool_util/verify/interactor.py @@ -38,10 +38,10 @@ from galaxy.tool_util.parser.interface import ( AssertionList, TestCollectionDef, - TestCollectionDefDict, TestCollectionOutputDef, TestSourceTestOutputColllection, ToolSourceTestOutputs, + XmlTestCollectionDefDict, ) from galaxy.util import requests from galaxy.util.bunch import Bunch @@ -1754,7 +1754,7 @@ def expanded_inputs_from_json(expanded_inputs_json: ExpandedToolInputsJsonified) loaded_inputs: ExpandedToolInputs = {} for key, value in expanded_inputs_json.items(): if isinstance(value, dict) and value.get("model_class"): - collection_def_dict = cast(TestCollectionDefDict, value) + collection_def_dict = cast(XmlTestCollectionDefDict, value) loaded_inputs[key] = TestCollectionDef.from_dict(collection_def_dict) else: loaded_inputs[key] = value diff --git a/test/unit/tool_util/parameter_specification.yml b/test/unit/tool_util/parameter_specification.yml index da3671057075..8497843a427c 100644 --- a/test/unit/tool_util/parameter_specification.yml +++ b/test/unit/tool_util/parameter_specification.yml @@ -533,6 +533,13 @@ gx_data: # expanded out. - parameter: {__class__: "Batch", values: [{src: hdca, id: 5}]} - parameter: {src: hda, id: abcdabcd} + test_case_xml_valid: + - parameter: {class: File, path: foo.bed} + - parameter: {class: File, location: "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt"} + test_case_xml_invalid: + - parameter: foo.bed + - parameter: null + - {} workflow_step_valid: - {} workflow_step_invalid: @@ -592,6 +599,11 @@ gx_data_optional: - {src: hda, id: 7} - parameter: {__class__: "Batch", values: [{src: hdca, id: 5}]} - parameter: {__class__: 'ConnectedValueX'} + test_case_xml_valid: + - {} + - parameter: {class: "File", path: "1.bed"} + test_case_xml_invalid: + - parameter: {class: "NotAFile", path: "1.bed"} gx_data_multiple: request_valid: @@ -690,6 +702,49 @@ gx_data_collection: - parameter: {__class__: 'ConnectedValue'} workflow_step_linked_invalid: - {} + test_case_xml_valid: + - parameter: {class: Collection, collection_type: list, elements: []} + - parameter: + class: Collection + collection_type: list + elements: + - {identifier: "first", path: "1.bed", class: File} + - {identifier: "second", path: "2.bed", class: File} + - parameter: + name: "A nested listed with a name" + class: Collection + collection_type: "list:paired" + elements: + - class: Collection + collection_type: paired + identifier: first_el + elements: + - {identifier: "forward", path: "1_f.bed", class: File} + - {identifier: "reverse", path: "1_r.bed", class: File} + test_case_xml_invalid: + # collection type is required + - parameter: {class: Collection, elements: []} + - parameter: + class: Collection + collection_type: list + elements: + - {identifier: "first", path: "1.bed", class: NotAFile} + - parameter: + class: Collection + collection_type: list + elements: + - {identifier: "first", pathmisspelled: "1.bed", class: File} + - parameter: + name: "A nested listed with a name" + class: Collection + collection_type: "list:paired" + elements: + - class: Collection + collection_type: paired + identifier: first_el + elements: + - {identifier: "forward", path: "1_f.bed", class: File} + - {identifier: "reverse", path: "1_r.bed", class: FileX} gx_data_collection_optional: request_valid: @@ -995,11 +1050,11 @@ gx_data_column: request_internal_invalid: - { ref_parameter: {src: hda, id: 123}, parameter: "0" } test_case_xml_valid: - - { ref_parameter: "1.bed", parameter: 3 } + - { ref_parameter: {class: "File", path: "1.bed"}, parameter: 3 } test_case_xml_invalid: - - { ref_parameter: "1.bed", parameter: "3" } + - { ref_parameter: {class: "File", path: "1.bed"}, parameter: "3" } test_case_xml_invalid: - - { ref_parameter: "1.bed", parameter: "c2: With name" } + - { ref_parameter: {class: "File", path: "1.bed"}, parameter: "c2: With name" } gx_data_column_optional: request_valid: diff --git a/test/unit/tool_util/test_parameter_test_cases.py b/test/unit/tool_util/test_parameter_test_cases.py index aa5db0319259..c0899aee2163 100644 --- a/test/unit/tool_util/test_parameter_test_cases.py +++ b/test/unit/tool_util/test_parameter_test_cases.py @@ -8,13 +8,18 @@ from galaxy.tool_util.models import parse_tool from galaxy.tool_util.parameters.case import ( test_case_state as case_state, + TestCaseStateAndWarnings, TestCaseStateValidationResult, validate_test_cases_for_tool_source, ) from galaxy.tool_util.parser.factory import get_tool_source -from galaxy.tool_util.parser.interface import ToolSourceTest +from galaxy.tool_util.parser.interface import ( + ToolSource, + ToolSourceTest, +) from galaxy.tool_util.unittest_utils import functional_test_tool_directory from galaxy.tool_util.verify.parse import parse_tool_test_descriptions +from .util import dict_verify_each # legacy tools allows specifying parameter and repeat parameters without # qualification. This was problematic and could result in ambigious specifications. @@ -116,6 +121,73 @@ def test_validate_framework_test_tools(): raise Exception(f"Failed to validate {tool_path}: {str(e)}") +def test_test_case_state_conversion(): + tool_source = tool_source_for("collection_nested_test") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["f1", "collection_type"], "list:paired"), + (["f1", "class"], "Collection"), + (["f1", "elements", 0, "class"], "Collection"), + (["f1", "elements", 0, "collection_type"], "paired"), + (["f1", "elements", 0, "elements", 0, "class"], "File"), + (["f1", "elements", 0, "elements", 0, "path"], "simple_line.txt"), + (["f1", "elements", 0, "elements", 0, "identifier"], "forward"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("dbkey_filter_input") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["inputs", "class"], "File"), + (["inputs", "dbkey"], "hg19"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("discover_metadata_files") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["input_bam", "class"], "File"), + (["input_bam", "filetype"], "bam"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("remote_test_data_location") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["input", "class"], "File"), + ( + ["input", "location"], + "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt", + ), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("composite") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["input", "class"], "File"), + (["input", "filetype"], "velvet"), + (["input", "composite_data", 0], "velveth_test1/Sequences"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("parameters/gx_group_tag") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["ref_parameter", "class"], "Collection"), + (["ref_parameter", "collection_type"], "paired"), + (["ref_parameter", "elements", 0, "identifier"], "forward"), + (["ref_parameter", "elements", 0, "tags", 0], "group:type:single"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + def _validate_path(tool_path: str): tool_source = get_tool_source(tool_path) parsed_tool = parse_tool(tool_source) @@ -130,7 +202,17 @@ def _validate_path(tool_path: str): def validate_test_cases_for(tool_name: str, **kwd) -> List[TestCaseStateValidationResult]: + return validate_test_cases_for_tool_source(tool_source_for(tool_name), **kwd) + + +def case_state_for(tool_source: ToolSource, test_case: ToolSourceTest) -> TestCaseStateAndWarnings: + parsed_tool = parse_tool(tool_source) + profile = tool_source.parse_profile() + return case_state(test_case, parsed_tool.inputs, profile) + + +def tool_source_for(tool_name: str) -> ToolSource: test_tool_directory = functional_test_tool_directory() tool_path = os.path.join(test_tool_directory, f"{tool_name}.xml") tool_source = get_tool_source(tool_path) - return validate_test_cases_for_tool_source(tool_source, **kwd) + return tool_source diff --git a/test/unit/tool_util/test_test_definition_parsing.py b/test/unit/tool_util/test_test_definition_parsing.py index f7c2c3bd28b9..6576660c3b00 100644 --- a/test/unit/tool_util/test_test_definition_parsing.py +++ b/test/unit/tool_util/test_test_definition_parsing.py @@ -1,6 +1,5 @@ """Tool test parsing to dicts logic.""" -import json import os from typing import ( Any, @@ -17,6 +16,7 @@ in_packages, ) from galaxy.util.unittest import TestCase +from .util import dict_verify_each # Not the whole response, just some keys and such to test... SIMPLE_CONSTRUCTS_EXPECTATIONS_0 = [ @@ -117,18 +117,6 @@ def test_bigwigtowig_converter(self): self._verify_each(test_dicts[1].to_dict(), BIGWIG_TO_WIG_EXPECTATIONS) def _verify_each(self, target_dict: dict, expectations: List[Any]): - assert_json_encodable(target_dict) - for path, expectation in expectations: - exception = target_dict.get("exception") - assert not exception, f"Test failed to generate with exception {exception}" - self._verify(target_dict, path, expectation) - - def _verify(self, target_dict: dict, expectation_path: List[str], expectation: Any): - rest = target_dict - for path_part in expectation_path: - rest = rest[path_part] - assert rest == expectation, f"{rest} != {expectation} for {expectation_path}" - - -def assert_json_encodable(as_dict: dict): - json.dumps(as_dict) + exception = target_dict.get("exception") + assert not exception, f"Test failed to generate with exception {exception}" + dict_verify_each(target_dict, expectations) diff --git a/test/unit/tool_util/util.py b/test/unit/tool_util/util.py index c94345b29f54..b03aff0fc15f 100644 --- a/test/unit/tool_util/util.py +++ b/test/unit/tool_util/util.py @@ -1,11 +1,35 @@ +import json from contextlib import contextmanager from os import environ +from typing import ( + Any, + List, +) import pytest external_dependency_management = pytest.mark.external_dependency_management +def dict_verify_each(target_dict: dict, expectations: List[Any]): + assert_json_encodable(target_dict) + for path, expectation in expectations: + exception = target_dict.get("exception") + assert not exception, f"Test failed to generate with exception {exception}" + dict_verify(target_dict, path, expectation) + + +def dict_verify(target_dict: dict, expectation_path: List[str], expectation: Any): + rest = target_dict + for path_part in expectation_path: + rest = rest[path_part] + assert rest == expectation, f"{rest} != {expectation} for {expectation_path}" + + +def assert_json_encodable(as_dict: dict): + json.dumps(as_dict) + + @contextmanager def modify_environ(values, keys_to_remove=None): """