diff --git a/lib/galaxy/tool_util/models.py b/lib/galaxy/tool_util/models.py
index 05fab48515b3..deafef1a5243 100644
--- a/lib/galaxy/tool_util/models.py
+++ b/lib/galaxy/tool_util/models.py
@@ -48,7 +48,7 @@ def parse_tool(tool_source: ToolSource) -> ParsedTool:
version = tool_source.parse_version()
name = tool_source.parse_name()
description = tool_source.parse_description()
- inputs = input_models_for_tool_source(tool_source).input_models
+ inputs = input_models_for_tool_source(tool_source).parameters
outputs = from_tool_source(tool_source)
citations = tool_source.parse_citations()
license = tool_source.parse_license()
diff --git a/lib/galaxy/tool_util/parameters/__init__.py b/lib/galaxy/tool_util/parameters/__init__.py
index 423d4e950bf5..ac6ca0dbbf3c 100644
--- a/lib/galaxy/tool_util/parameters/__init__.py
+++ b/lib/galaxy/tool_util/parameters/__init__.py
@@ -62,6 +62,7 @@
repeat_inputs_to_array,
validate_explicit_conditional_test_value,
visit_input_values,
+ VISITOR_NO_REPLACEMENT,
)
__all__ = (
@@ -116,6 +117,7 @@
"keys_starting_with",
"visit_input_values",
"repeat_inputs_to_array",
+ "VISITOR_NO_REPLACEMENT",
"decode",
"encode",
"WorkflowStepToolState",
diff --git a/lib/galaxy/tool_util/parameters/case.py b/lib/galaxy/tool_util/parameters/case.py
index 15c029fc68ff..cd8aa4f71e04 100644
--- a/lib/galaxy/tool_util/parameters/case.py
+++ b/lib/galaxy/tool_util/parameters/case.py
@@ -12,10 +12,13 @@
from packaging.version import Version
from galaxy.tool_util.parser.interface import (
+ TestCollectionDef,
ToolSource,
ToolSourceTest,
ToolSourceTestInput,
ToolSourceTestInputs,
+ xml_data_input_to_json,
+ XmlTestCollectionDefDict,
)
from galaxy.util import asbool
from .factory import input_models_for_tool_source
@@ -25,6 +28,7 @@
ConditionalWhen,
DataCollectionParameterModel,
DataColumnParameterModel,
+ DataParameterModel,
FloatParameterModel,
IntegerParameterModel,
RepeatParameterModel,
@@ -249,8 +253,26 @@ def _merge_into_state(
else:
test_input = _input_for(state_path, inputs)
if test_input is not None:
+ input_value: Any
if isinstance(tool_input, (DataCollectionParameterModel,)):
- input_value = test_input.get("attributes", {}).get("collection")
+ input_value = TestCollectionDef.from_dict(
+ cast(XmlTestCollectionDefDict, test_input.get("attributes", {}).get("collection"))
+ ).test_format_to_dict()
+ elif isinstance(tool_input, (DataParameterModel,)):
+ data_tool_input = cast(DataParameterModel, tool_input)
+ if data_tool_input.multiple:
+ value = test_input["value"]
+ input_value_list = []
+ if value:
+ test_input_values = cast(str, value).split(",")
+ for test_input_value in test_input_values:
+ instance_test_input = test_input.copy()
+ instance_test_input["value"] = test_input_value
+ input_value = xml_data_input_to_json(test_input)
+ input_value_list.append(input_value)
+ input_value = input_value_list
+ else:
+ input_value = xml_data_input_to_json(test_input)
else:
input_value = test_input["value"]
input_value = legacy_from_string(tool_input, input_value, warnings, profile)
@@ -299,6 +321,6 @@ def validate_test_cases_for_tool_source(
test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"]
results_by_test: List[TestCaseStateValidationResult] = []
for test_case in test_cases:
- validation_result = test_case_validation(test_case, tool_parameter_bundle.input_models, profile)
+ validation_result = test_case_validation(test_case, tool_parameter_bundle.parameters, profile)
results_by_test.append(validation_result)
return results_by_test
diff --git a/lib/galaxy/tool_util/parameters/factory.py b/lib/galaxy/tool_util/parameters/factory.py
index de7d567458c5..a636b13a17e6 100644
--- a/lib/galaxy/tool_util/parameters/factory.py
+++ b/lib/galaxy/tool_util/parameters/factory.py
@@ -319,7 +319,7 @@ def _from_input_source_cwl(input_source: CwlInputSource) -> ToolParameterT:
def input_models_from_json(json: List[Dict[str, Any]]) -> ToolParameterBundle:
- return ToolParameterBundleModel(input_models=json)
+ return ToolParameterBundleModel(parameters=json)
def tool_parameter_bundle_from_json(json: Dict[str, Any]) -> ToolParameterBundleModel:
@@ -328,7 +328,7 @@ def tool_parameter_bundle_from_json(json: Dict[str, Any]) -> ToolParameterBundle
def input_models_for_tool_source(tool_source: ToolSource) -> ToolParameterBundleModel:
pages = tool_source.parse_input_pages()
- return ToolParameterBundleModel(input_models=input_models_for_pages(pages))
+ return ToolParameterBundleModel(parameters=input_models_for_pages(pages))
def input_models_for_pages(pages: PagesSource) -> List[ToolParameterT]:
diff --git a/lib/galaxy/tool_util/parameters/models.py b/lib/galaxy/tool_util/parameters/models.py
index f83b0bd0d9e0..30d6b5a5a483 100644
--- a/lib/galaxy/tool_util/parameters/models.py
+++ b/lib/galaxy/tool_util/parameters/models.py
@@ -41,7 +41,8 @@
from galaxy.exceptions import RequestParameterInvalidException
from galaxy.tool_util.parser.interface import (
DrillDownOptionsDict,
- TestCollectionDefDict,
+ JsonTestCollectionDefDict,
+ JsonTestDatasetDefDict,
)
from ._types import (
cast_as_type,
@@ -312,9 +313,9 @@ def py_type_internal(self) -> Type:
def py_type_test_case(self) -> Type:
base_model: Type
if self.multiple:
- base_model = str
+ base_model = list_type(JsonTestDatasetDefDict)
else:
- base_model = str
+ base_model = JsonTestDatasetDefDict
return optional_if_needed(base_model, self.optional)
def pydantic_template(self, state_representation: StateRepresentationT) -> DynamicModelInformation:
@@ -372,7 +373,7 @@ def pydantic_template(self, state_representation: StateRepresentationT) -> Dynam
elif state_representation == "workflow_step_linked":
return dynamic_model_information_from_py_type(self, ConnectedValue)
elif state_representation == "test_case_xml":
- return dynamic_model_information_from_py_type(self, TestCollectionDefDict)
+ return dynamic_model_information_from_py_type(self, JsonTestCollectionDefDict)
else:
raise NotImplementedError(
f"Have not implemented data collection parameter models for state representation {state_representation}"
@@ -1164,12 +1165,11 @@ class ToolParameterModel(RootModel):
class ToolParameterBundle(Protocol):
"""An object having a dictionary of input models (i.e. a 'Tool')"""
- # TODO: rename to parameters to align with ConditionalWhen and Repeat.
- input_models: List[ToolParameterT]
+ parameters: List[ToolParameterT]
class ToolParameterBundleModel(BaseModel):
- input_models: List[ToolParameterT]
+ parameters: List[ToolParameterT]
def to_simple_model(input_parameter: Union[ToolParameterModel, ToolParameterT]) -> ToolParameterT:
@@ -1180,10 +1180,8 @@ def to_simple_model(input_parameter: Union[ToolParameterModel, ToolParameterT])
return cast(ToolParameterT, input_parameter)
-def simple_input_models(
- input_models: Union[List[ToolParameterModel], List[ToolParameterT]]
-) -> Iterable[ToolParameterT]:
- return [to_simple_model(m) for m in input_models]
+def simple_input_models(parameters: Union[List[ToolParameterModel], List[ToolParameterT]]) -> Iterable[ToolParameterT]:
+ return [to_simple_model(m) for m in parameters]
def create_model_strict(*args, **kwd) -> Type[BaseModel]:
@@ -1194,27 +1192,27 @@ def create_model_strict(*args, **kwd) -> Type[BaseModel]:
def create_request_model(tool: ToolParameterBundle, name: str = "DynamicModelForTool") -> Type[BaseModel]:
- return create_field_model(tool.input_models, name, "request")
+ return create_field_model(tool.parameters, name, "request")
def create_request_internal_model(tool: ToolParameterBundle, name: str = "DynamicModelForTool") -> Type[BaseModel]:
- return create_field_model(tool.input_models, name, "request_internal")
+ return create_field_model(tool.parameters, name, "request_internal")
def create_job_internal_model(tool: ToolParameterBundle, name: str = "DynamicModelForTool") -> Type[BaseModel]:
- return create_field_model(tool.input_models, name, "job_internal")
+ return create_field_model(tool.parameters, name, "job_internal")
def create_test_case_model(tool: ToolParameterBundle, name: str = "DynamicModelForTool") -> Type[BaseModel]:
- return create_field_model(tool.input_models, name, "test_case_xml")
+ return create_field_model(tool.parameters, name, "test_case_xml")
def create_workflow_step_model(tool: ToolParameterBundle, name: str = "DynamicModelForTool") -> Type[BaseModel]:
- return create_field_model(tool.input_models, name, "workflow_step")
+ return create_field_model(tool.parameters, name, "workflow_step")
def create_workflow_step_linked_model(tool: ToolParameterBundle, name: str = "DynamicModelForTool") -> Type[BaseModel]:
- return create_field_model(tool.input_models, name, "workflow_step_linked")
+ return create_field_model(tool.parameters, name, "workflow_step_linked")
def create_field_model(
diff --git a/lib/galaxy/tool_util/parameters/state.py b/lib/galaxy/tool_util/parameters/state.py
index c21163d1e786..94eb48b5de52 100644
--- a/lib/galaxy/tool_util/parameters/state.py
+++ b/lib/galaxy/tool_util/parameters/state.py
@@ -39,8 +39,8 @@ def __init__(self, input_state: Dict[str, Any]):
def _validate(self, pydantic_model: Type[BaseModel]) -> None:
validate_against_model(pydantic_model, self.input_state)
- def validate(self, input_models: HasToolParameters) -> None:
- base_model = self.parameter_model_for(input_models)
+ def validate(self, parameters: HasToolParameters) -> None:
+ base_model = self.parameter_model_for(parameters)
if base_model is None:
raise NotImplementedError(
f"Validating tool state against state representation {self.state_representation} is not implemented."
@@ -53,17 +53,17 @@ def state_representation(self) -> StateRepresentationT:
"""Get state representation of the inputs."""
@classmethod
- def parameter_model_for(cls, input_models: HasToolParameters) -> Type[BaseModel]:
+ def parameter_model_for(cls, parameters: HasToolParameters) -> Type[BaseModel]:
bundle: ToolParameterBundle
- if isinstance(input_models, list):
- bundle = ToolParameterBundleModel(input_models=input_models)
+ if isinstance(parameters, list):
+ bundle = ToolParameterBundleModel(parameters=parameters)
else:
- bundle = input_models
+ bundle = parameters
return cls._parameter_model_for(bundle)
@classmethod
@abstractmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
"""Return a model type for this tool state kind."""
@@ -71,46 +71,46 @@ class RequestToolState(ToolState):
state_representation: Literal["request"] = "request"
@classmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
- return create_request_model(input_models)
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
+ return create_request_model(parameters)
class RequestInternalToolState(ToolState):
state_representation: Literal["request_internal"] = "request_internal"
@classmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
- return create_request_internal_model(input_models)
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
+ return create_request_internal_model(parameters)
class JobInternalToolState(ToolState):
state_representation: Literal["job_internal"] = "job_internal"
@classmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
- return create_job_internal_model(input_models)
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
+ return create_job_internal_model(parameters)
class TestCaseToolState(ToolState):
state_representation: Literal["test_case_xml"] = "test_case_xml"
@classmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
# implement a test case model...
- return create_test_case_model(input_models)
+ return create_test_case_model(parameters)
class WorkflowStepToolState(ToolState):
state_representation: Literal["workflow_step"] = "workflow_step"
@classmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
- return create_workflow_step_model(input_models)
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
+ return create_workflow_step_model(parameters)
class WorkflowStepLinkedToolState(ToolState):
state_representation: Literal["workflow_step_linked"] = "workflow_step_linked"
@classmethod
- def _parameter_model_for(cls, input_models: ToolParameterBundle) -> Type[BaseModel]:
- return create_workflow_step_linked_model(input_models)
+ def _parameter_model_for(cls, parameters: ToolParameterBundle) -> Type[BaseModel]:
+ return create_workflow_step_linked_model(parameters)
diff --git a/lib/galaxy/tool_util/parameters/visitor.py b/lib/galaxy/tool_util/parameters/visitor.py
index 363dd9298bc0..5b8e059f2895 100644
--- a/lib/galaxy/tool_util/parameters/visitor.py
+++ b/lib/galaxy/tool_util/parameters/visitor.py
@@ -34,7 +34,7 @@ def visit_input_values(
no_replacement_value=VISITOR_NO_REPLACEMENT,
) -> Dict[str, Any]:
return _visit_input_values(
- simple_input_models(input_models.input_models),
+ simple_input_models(input_models.parameters),
tool_state.input_state,
callback=callback,
no_replacement_value=no_replacement_value,
diff --git a/lib/galaxy/tool_util/parser/interface.py b/lib/galaxy/tool_util/parser/interface.py
index 13052655d5a5..50ec9ed30d17 100644
--- a/lib/galaxy/tool_util/parser/interface.py
+++ b/lib/galaxy/tool_util/parser/interface.py
@@ -8,6 +8,7 @@
from os.path import join
from typing import (
Any,
+ cast,
Dict,
List,
Optional,
@@ -557,7 +558,8 @@ def parse_input_sources(self) -> List[InputSource]:
"""Return a list of InputSource objects."""
-TestCollectionDefElementObject = Union["TestCollectionDefDict", "ToolSourceTestInput"]
+AnyTestCollectionDefDict = Union["JsonTestCollectionDefDict", "XmlTestCollectionDefDict"]
+TestCollectionDefElementObject = Union[AnyTestCollectionDefDict, "ToolSourceTestInput"]
TestCollectionAttributeDict = Dict[str, Any]
CollectionType = str
@@ -567,7 +569,16 @@ class TestCollectionDefElementDict(TypedDict):
element_definition: TestCollectionDefElementObject
-class TestCollectionDefDict(TypedDict):
+class TestCollectionDefElementInternal(TypedDict):
+ element_identifier: str
+ element_definition: Union["TestCollectionDef", "ToolSourceTestInput"]
+
+
+# two versions of collection inputs can be parsed out, XmlTestCollectionDefDict is historically
+# used by tools and Galaxy internals and exposed in the API via the test definition endpoints for
+# tool execution. JsonTestCollectionDefDict is the format consumed by Planemo that mirrors a CWL
+# way of defining inputs.
+class XmlTestCollectionDefDict(TypedDict):
model_class: Literal["TestCollectionDef"]
attributes: TestCollectionAttributeDict
collection_type: CollectionType
@@ -575,8 +586,96 @@ class TestCollectionDefDict(TypedDict):
name: str
+JsonTestDatasetDefDict = TypedDict(
+ "JsonTestDatasetDefDict",
+ {
+ "class": Literal["File"],
+ "path": NotRequired[Optional[str]],
+ "location": NotRequired[Optional[str]],
+ "name": NotRequired[Optional[str]],
+ "dbkey": NotRequired[Optional[str]],
+ "filetype": NotRequired[Optional[str]],
+ "composite_data": NotRequired[Optional[List[str]]],
+ "tags": NotRequired[Optional[List[str]]],
+ },
+)
+
+JsonTestCollectionDefElementDict = Union[
+ "JsonTestCollectionDefDatasetElementDict", "JsonTestCollectionDefCollectionElementDict"
+]
+JsonTestCollectionDefDatasetElementDict = TypedDict(
+ "JsonTestCollectionDefDatasetElementDict",
+ {
+ "identifier": str,
+ "class": Literal["File"],
+ "path": NotRequired[Optional[str]],
+ "location": NotRequired[Optional[str]],
+ "name": NotRequired[Optional[str]],
+ "dbkey": NotRequired[Optional[str]],
+ "filetype": NotRequired[Optional[str]],
+ "composite_data": NotRequired[Optional[List[str]]],
+ "tags": NotRequired[Optional[List[str]]],
+ },
+)
+BaseJsonTestCollectionDefCollectionElementDict = TypedDict(
+ "BaseJsonTestCollectionDefCollectionElementDict",
+ {
+ "class": Literal["Collection"],
+ "collection_type": str,
+ "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]],
+ },
+)
+JsonTestCollectionDefCollectionElementDict = TypedDict(
+ "JsonTestCollectionDefCollectionElementDict",
+ {
+ "identifier": str,
+ "class": Literal["Collection"],
+ "collection_type": str,
+ "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]],
+ },
+)
+JsonTestCollectionDefDict = TypedDict(
+ "JsonTestCollectionDefDict",
+ {
+ "class": Literal["Collection"],
+ "collection_type": str,
+ "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]],
+ "name": NotRequired[Optional[str]],
+ },
+)
+
+
+def xml_data_input_to_json(xml_input: ToolSourceTestInput) -> "JsonTestDatasetDefDict":
+ attributes = xml_input["attributes"]
+ as_dict: JsonTestDatasetDefDict = {
+ "class": "File",
+ }
+ value = xml_input["value"]
+ if value:
+ as_dict["path"] = value
+ _copy_if_exists(attributes, as_dict, "location")
+ _copy_if_exists(attributes, as_dict, "dbkey")
+ _copy_if_exists(attributes, as_dict, "ftype", "filetype")
+ _copy_if_exists(attributes, as_dict, "composite_data", only_if_value=True)
+ tags = attributes.get("tags")
+ if tags:
+ as_dict["tags"] = [t.strip() for t in tags.split(",")]
+ return as_dict
+
+
+def _copy_if_exists(attributes, as_dict, name: str, as_name: Optional[str] = None, only_if_value: bool = False):
+ if name in attributes:
+ value = attributes[name]
+ if not value and only_if_value:
+ return
+ if as_name is None:
+ as_name = name
+ as_dict[as_name] = value
+
+
class TestCollectionDef:
__test__ = False # Prevent pytest from discovering this class (issue #12071)
+ elements: List[TestCollectionDefElementInternal]
def __init__(self, attrib, name, collection_type, elements):
self.attrib = attrib
@@ -584,7 +683,40 @@ def __init__(self, attrib, name, collection_type, elements):
self.elements = elements
self.name = name
- def to_dict(self) -> TestCollectionDefDict:
+ def _test_format_to_dict(self) -> "BaseJsonTestCollectionDefCollectionElementDict":
+
+ def to_element(xml_element_dict: "TestCollectionDefElementInternal") -> "JsonTestCollectionDefElementDict":
+ identifier = xml_element_dict["element_identifier"]
+ element_object = xml_element_dict["element_definition"]
+ as_dict: JsonTestCollectionDefElementDict
+
+ if isinstance(element_object, TestCollectionDef):
+ as_dict = JsonTestCollectionDefCollectionElementDict(
+ identifier=identifier, **element_object._test_format_to_dict()
+ )
+ else:
+ as_dict = JsonTestCollectionDefDatasetElementDict(
+ identifier=identifier,
+ **xml_data_input_to_json(cast(ToolSourceTestInput, element_object)),
+ )
+ return as_dict
+
+ test_format_dict = BaseJsonTestCollectionDefCollectionElementDict(
+ {
+ "class": "Collection",
+ "elements": list(map(to_element, self.elements)),
+ "collection_type": self.collection_type,
+ }
+ )
+ return test_format_dict
+
+ def test_format_to_dict(self) -> JsonTestCollectionDefDict:
+ test_format_dict = JsonTestCollectionDefDict(**self._test_format_to_dict())
+ if self.name:
+ test_format_dict["name"] = self.name
+ return test_format_dict
+
+ def to_dict(self) -> XmlTestCollectionDefDict:
def element_to_dict(element_dict):
element_identifier, element_def = element_dict["element_identifier"], element_dict["element_definition"]
if isinstance(element_def, TestCollectionDef):
@@ -603,23 +735,53 @@ def element_to_dict(element_dict):
}
@staticmethod
- def from_dict(as_dict: TestCollectionDefDict):
- assert as_dict["model_class"] == "TestCollectionDef"
-
- def element_from_dict(element_dict):
- if "element_definition" not in element_dict:
- raise Exception(f"Invalid element_dict {element_dict}")
- element_def = element_dict["element_definition"]
- if element_def.get("model_class", None) == "TestCollectionDef":
- element_def = TestCollectionDef.from_dict(element_def)
- return {"element_identifier": element_dict["element_identifier"], "element_definition": element_def}
-
- return TestCollectionDef(
- attrib=as_dict["attributes"],
- name=as_dict["name"],
- elements=list(map(element_from_dict, as_dict["elements"] or [])),
- collection_type=as_dict["collection_type"],
- )
+ def from_dict(
+ as_dict: Union[AnyTestCollectionDefDict, JsonTestCollectionDefCollectionElementDict]
+ ) -> "TestCollectionDef":
+ if "model_class" in as_dict:
+ xml_as_dict = cast(XmlTestCollectionDefDict, as_dict)
+ assert xml_as_dict["model_class"] == "TestCollectionDef"
+
+ def element_from_dict(element_dict) -> TestCollectionDefElementInternal:
+ if "element_definition" not in element_dict:
+ raise Exception(f"Invalid element_dict {element_dict}")
+ element_def = element_dict["element_definition"]
+ if element_def.get("model_class", None) == "TestCollectionDef":
+ element_def = TestCollectionDef.from_dict(element_def)
+ return {"element_identifier": element_dict["element_identifier"], "element_definition": element_def}
+
+ return TestCollectionDef(
+ attrib=xml_as_dict["attributes"],
+ name=xml_as_dict.get("name", "Unnamed Collection"),
+ elements=list(map(element_from_dict, xml_as_dict["elements"] or [])),
+ collection_type=xml_as_dict["collection_type"],
+ )
+ else:
+ json_as_dict = cast(JsonTestCollectionDefDict, as_dict)
+
+ def element_from_dict_json(
+ element_dict: JsonTestCollectionDefElementDict,
+ ) -> TestCollectionDefElementInternal:
+ element_class = element_dict.get("class")
+ identifier = element_dict["identifier"]
+ element_def: Union[TestCollectionDef, ToolSourceTestInput]
+ if element_class == "Collection":
+ collection_element_dict = cast(JsonTestCollectionDefCollectionElementDict, element_dict)
+ element_def = TestCollectionDef.from_dict(collection_element_dict)
+ else:
+ dataset_element_dict = cast(JsonTestCollectionDefDatasetElementDict, element_dict)
+ value = dataset_element_dict["path"] # todo handle location
+ name = dataset_element_dict.get("name") or "Unnamed Collection"
+ element_def = {"name": name, "value": value, "attributes": {}}
+ return TestCollectionDefElementInternal(element_identifier=identifier, element_definition=element_def)
+
+ elements = list(map(element_from_dict_json, json_as_dict.get("elements") or []))
+ return TestCollectionDef(
+ attrib={},
+ name=json_as_dict.get("name") or "Unnamed Collection",
+ elements=elements,
+ collection_type=json_as_dict["collection_type"],
+ )
def collect_inputs(self):
inputs = []
diff --git a/lib/galaxy/tool_util/parser/xml.py b/lib/galaxy/tool_util/parser/xml.py
index acfb3342443a..c61b178e64c7 100644
--- a/lib/galaxy/tool_util/parser/xml.py
+++ b/lib/galaxy/tool_util/parser/xml.py
@@ -46,7 +46,6 @@
PageSource,
PagesSource,
RequiredFiles,
- TestCollectionDefDict,
TestCollectionDefElementDict,
TestCollectionDefElementObject,
TestCollectionOutputDef,
@@ -59,6 +58,7 @@
ToolSourceTestOutputAttributes,
ToolSourceTestOutputs,
ToolSourceTests,
+ XmlTestCollectionDefDict,
XrefDict,
)
from .output_actions import ToolOutputActionGroup
@@ -1011,7 +1011,7 @@ def __parse_inputs_elems(test_elem, i) -> ToolSourceTestInputs:
return raw_inputs
-def _test_collection_def_dict(elem: Element) -> TestCollectionDefDict:
+def _test_collection_def_dict(elem: Element) -> XmlTestCollectionDefDict:
elements: List[TestCollectionDefElementDict] = []
attrib: Dict[str, Any] = _element_to_dict(elem)
collection_type = attrib["type"]
@@ -1027,7 +1027,7 @@ def _test_collection_def_dict(elem: Element) -> TestCollectionDefDict:
element_definition = __parse_param_elem(element)
elements.append({"element_identifier": element_identifier, "element_definition": element_definition})
- return TestCollectionDefDict(
+ return XmlTestCollectionDefDict(
model_class="TestCollectionDef",
attributes=attrib,
collection_type=collection_type,
diff --git a/lib/galaxy/tool_util/unittest_utils/parameters.py b/lib/galaxy/tool_util/unittest_utils/parameters.py
index 07ec28ffcb70..906dc4e5cfcd 100644
--- a/lib/galaxy/tool_util/unittest_utils/parameters.py
+++ b/lib/galaxy/tool_util/unittest_utils/parameters.py
@@ -17,7 +17,7 @@
class ParameterBundle(ToolParameterBundle):
def __init__(self, parameter: ToolParameterT):
- self.input_models = [parameter]
+ self.parameters = [parameter]
def parameter_bundle(parameter: ToolParameterT) -> ParameterBundle:
diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py
index 6a233196a08a..9e1dd5bf87d4 100644
--- a/lib/galaxy/tool_util/verify/interactor.py
+++ b/lib/galaxy/tool_util/verify/interactor.py
@@ -38,10 +38,10 @@
from galaxy.tool_util.parser.interface import (
AssertionList,
TestCollectionDef,
- TestCollectionDefDict,
TestCollectionOutputDef,
TestSourceTestOutputColllection,
ToolSourceTestOutputs,
+ XmlTestCollectionDefDict,
)
from galaxy.util import requests
from galaxy.util.bunch import Bunch
@@ -588,7 +588,9 @@ def _ensure_valid_location_in(self, test_data: dict) -> Optional[str]:
raise ValueError(f"Invalid `location` URL: `{location}`")
return location
- def run_tool(self, testdef, history_id, resource_parameters=None) -> RunToolResponse:
+ def run_tool(
+ self, testdef: "ToolTestDescription", history_id: str, resource_parameters: Optional[Dict[str, Any]] = None
+ ) -> RunToolResponse:
# We need to handle the case where we've uploaded a valid compressed file since the upload
# tool will have uncompressed it on the fly.
resource_parameters = resource_parameters or {}
@@ -1754,7 +1756,7 @@ def expanded_inputs_from_json(expanded_inputs_json: ExpandedToolInputsJsonified)
loaded_inputs: ExpandedToolInputs = {}
for key, value in expanded_inputs_json.items():
if isinstance(value, dict) and value.get("model_class"):
- collection_def_dict = cast(TestCollectionDefDict, value)
+ collection_def_dict = cast(XmlTestCollectionDefDict, value)
loaded_inputs[key] = TestCollectionDef.from_dict(collection_def_dict)
else:
loaded_inputs[key] = value
diff --git a/lib/galaxy/tool_util/verify/parse.py b/lib/galaxy/tool_util/verify/parse.py
index 52f1a9af052b..a3aee97eef0c 100644
--- a/lib/galaxy/tool_util/verify/parse.py
+++ b/lib/galaxy/tool_util/verify/parse.py
@@ -67,7 +67,7 @@ def parse_tool_test_descriptions(
if validate_on_load:
tool_parameter_bundle = input_models_for_tool_source(tool_source)
try:
- case_state(raw_test_dict, tool_parameter_bundle.input_models, profile, validate=True)
+ case_state(raw_test_dict, tool_parameter_bundle.parameters, profile, validate=True)
except Exception as e:
# TOOD: restrict types of validation exceptions a bit probably?
validation_exception = e
diff --git a/test/functional/tools/options_from_metadata_file.xml b/test/functional/tools/options_from_metadata_file.xml
index 87e1863bbb8d..d0ee9114b8be 100644
--- a/test/functional/tools/options_from_metadata_file.xml
+++ b/test/functional/tools/options_from_metadata_file.xml
@@ -22,7 +22,7 @@ echo '${species_2}' >> '${output}'
-
+
diff --git a/test/unit/tool_util/parameter_specification.yml b/test/unit/tool_util/parameter_specification.yml
index da3671057075..8497843a427c 100644
--- a/test/unit/tool_util/parameter_specification.yml
+++ b/test/unit/tool_util/parameter_specification.yml
@@ -533,6 +533,13 @@ gx_data:
# expanded out.
- parameter: {__class__: "Batch", values: [{src: hdca, id: 5}]}
- parameter: {src: hda, id: abcdabcd}
+ test_case_xml_valid:
+ - parameter: {class: File, path: foo.bed}
+ - parameter: {class: File, location: "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt"}
+ test_case_xml_invalid:
+ - parameter: foo.bed
+ - parameter: null
+ - {}
workflow_step_valid:
- {}
workflow_step_invalid:
@@ -592,6 +599,11 @@ gx_data_optional:
- {src: hda, id: 7}
- parameter: {__class__: "Batch", values: [{src: hdca, id: 5}]}
- parameter: {__class__: 'ConnectedValueX'}
+ test_case_xml_valid:
+ - {}
+ - parameter: {class: "File", path: "1.bed"}
+ test_case_xml_invalid:
+ - parameter: {class: "NotAFile", path: "1.bed"}
gx_data_multiple:
request_valid:
@@ -690,6 +702,49 @@ gx_data_collection:
- parameter: {__class__: 'ConnectedValue'}
workflow_step_linked_invalid:
- {}
+ test_case_xml_valid:
+ - parameter: {class: Collection, collection_type: list, elements: []}
+ - parameter:
+ class: Collection
+ collection_type: list
+ elements:
+ - {identifier: "first", path: "1.bed", class: File}
+ - {identifier: "second", path: "2.bed", class: File}
+ - parameter:
+ name: "A nested listed with a name"
+ class: Collection
+ collection_type: "list:paired"
+ elements:
+ - class: Collection
+ collection_type: paired
+ identifier: first_el
+ elements:
+ - {identifier: "forward", path: "1_f.bed", class: File}
+ - {identifier: "reverse", path: "1_r.bed", class: File}
+ test_case_xml_invalid:
+ # collection type is required
+ - parameter: {class: Collection, elements: []}
+ - parameter:
+ class: Collection
+ collection_type: list
+ elements:
+ - {identifier: "first", path: "1.bed", class: NotAFile}
+ - parameter:
+ class: Collection
+ collection_type: list
+ elements:
+ - {identifier: "first", pathmisspelled: "1.bed", class: File}
+ - parameter:
+ name: "A nested listed with a name"
+ class: Collection
+ collection_type: "list:paired"
+ elements:
+ - class: Collection
+ collection_type: paired
+ identifier: first_el
+ elements:
+ - {identifier: "forward", path: "1_f.bed", class: File}
+ - {identifier: "reverse", path: "1_r.bed", class: FileX}
gx_data_collection_optional:
request_valid:
@@ -995,11 +1050,11 @@ gx_data_column:
request_internal_invalid:
- { ref_parameter: {src: hda, id: 123}, parameter: "0" }
test_case_xml_valid:
- - { ref_parameter: "1.bed", parameter: 3 }
+ - { ref_parameter: {class: "File", path: "1.bed"}, parameter: 3 }
test_case_xml_invalid:
- - { ref_parameter: "1.bed", parameter: "3" }
+ - { ref_parameter: {class: "File", path: "1.bed"}, parameter: "3" }
test_case_xml_invalid:
- - { ref_parameter: "1.bed", parameter: "c2: With name" }
+ - { ref_parameter: {class: "File", path: "1.bed"}, parameter: "c2: With name" }
gx_data_column_optional:
request_valid:
diff --git a/test/unit/tool_util/test_parameter_test_cases.py b/test/unit/tool_util/test_parameter_test_cases.py
index aa5db0319259..909c0bfd2da5 100644
--- a/test/unit/tool_util/test_parameter_test_cases.py
+++ b/test/unit/tool_util/test_parameter_test_cases.py
@@ -8,13 +8,18 @@
from galaxy.tool_util.models import parse_tool
from galaxy.tool_util.parameters.case import (
test_case_state as case_state,
+ TestCaseStateAndWarnings,
TestCaseStateValidationResult,
validate_test_cases_for_tool_source,
)
from galaxy.tool_util.parser.factory import get_tool_source
-from galaxy.tool_util.parser.interface import ToolSourceTest
+from galaxy.tool_util.parser.interface import (
+ ToolSource,
+ ToolSourceTest,
+)
from galaxy.tool_util.unittest_utils import functional_test_tool_directory
from galaxy.tool_util.verify.parse import parse_tool_test_descriptions
+from .util import dict_verify_each
# legacy tools allows specifying parameter and repeat parameters without
# qualification. This was problematic and could result in ambigious specifications.
@@ -116,6 +121,73 @@ def test_validate_framework_test_tools():
raise Exception(f"Failed to validate {tool_path}: {str(e)}")
+def test_test_case_state_conversion():
+ tool_source = tool_source_for("collection_nested_test")
+ test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"]
+ state = case_state_for(tool_source, test_cases[0])
+ expectations = [
+ (["f1", "collection_type"], "list:paired"),
+ (["f1", "class"], "Collection"),
+ (["f1", "elements", 0, "class"], "Collection"),
+ (["f1", "elements", 0, "collection_type"], "paired"),
+ (["f1", "elements", 0, "elements", 0, "class"], "File"),
+ (["f1", "elements", 0, "elements", 0, "path"], "simple_line.txt"),
+ (["f1", "elements", 0, "elements", 0, "identifier"], "forward"),
+ ]
+ dict_verify_each(state.tool_state.input_state, expectations)
+
+ tool_source = tool_source_for("dbkey_filter_input")
+ test_cases = tool_source.parse_tests_to_dict()["tests"]
+ state = case_state_for(tool_source, test_cases[0])
+ expectations = [
+ (["inputs", "class"], "File"),
+ (["inputs", "dbkey"], "hg19"),
+ ]
+ dict_verify_each(state.tool_state.input_state, expectations)
+
+ tool_source = tool_source_for("discover_metadata_files")
+ test_cases = tool_source.parse_tests_to_dict()["tests"]
+ state = case_state_for(tool_source, test_cases[0])
+ expectations = [
+ (["input_bam", "class"], "File"),
+ (["input_bam", "filetype"], "bam"),
+ ]
+ dict_verify_each(state.tool_state.input_state, expectations)
+
+ tool_source = tool_source_for("remote_test_data_location")
+ test_cases = tool_source.parse_tests_to_dict()["tests"]
+ state = case_state_for(tool_source, test_cases[0])
+ expectations = [
+ (["input", "class"], "File"),
+ (
+ ["input", "location"],
+ "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt",
+ ),
+ ]
+ dict_verify_each(state.tool_state.input_state, expectations)
+
+ tool_source = tool_source_for("composite")
+ test_cases = tool_source.parse_tests_to_dict()["tests"]
+ state = case_state_for(tool_source, test_cases[0])
+ expectations = [
+ (["input", "class"], "File"),
+ (["input", "filetype"], "velvet"),
+ (["input", "composite_data", 0], "velveth_test1/Sequences"),
+ ]
+ dict_verify_each(state.tool_state.input_state, expectations)
+
+ tool_source = tool_source_for("parameters/gx_group_tag")
+ test_cases = tool_source.parse_tests_to_dict()["tests"]
+ state = case_state_for(tool_source, test_cases[0])
+ expectations = [
+ (["ref_parameter", "class"], "Collection"),
+ (["ref_parameter", "collection_type"], "paired"),
+ (["ref_parameter", "elements", 0, "identifier"], "forward"),
+ (["ref_parameter", "elements", 0, "tags", 0], "group:type:single"),
+ ]
+ dict_verify_each(state.tool_state.input_state, expectations)
+
+
def _validate_path(tool_path: str):
tool_source = get_tool_source(tool_path)
parsed_tool = parse_tool(tool_source)
@@ -130,7 +202,17 @@ def _validate_path(tool_path: str):
def validate_test_cases_for(tool_name: str, **kwd) -> List[TestCaseStateValidationResult]:
+ return validate_test_cases_for_tool_source(tool_source_for(tool_name), **kwd)
+
+
+def case_state_for(tool_source: ToolSource, test_case: ToolSourceTest) -> TestCaseStateAndWarnings:
+ parsed_tool = parse_tool(tool_source)
+ profile = tool_source.parse_profile()
+ return case_state(test_case, parsed_tool.inputs, profile)
+
+
+def tool_source_for(tool_name: str) -> ToolSource:
test_tool_directory = functional_test_tool_directory()
tool_path = os.path.join(test_tool_directory, f"{tool_name}.xml")
tool_source = get_tool_source(tool_path)
- return validate_test_cases_for_tool_source(tool_source, **kwd)
+ return tool_source
diff --git a/test/unit/tool_util/test_test_definition_parsing.py b/test/unit/tool_util/test_test_definition_parsing.py
index f7c2c3bd28b9..6576660c3b00 100644
--- a/test/unit/tool_util/test_test_definition_parsing.py
+++ b/test/unit/tool_util/test_test_definition_parsing.py
@@ -1,6 +1,5 @@
"""Tool test parsing to dicts logic."""
-import json
import os
from typing import (
Any,
@@ -17,6 +16,7 @@
in_packages,
)
from galaxy.util.unittest import TestCase
+from .util import dict_verify_each
# Not the whole response, just some keys and such to test...
SIMPLE_CONSTRUCTS_EXPECTATIONS_0 = [
@@ -117,18 +117,6 @@ def test_bigwigtowig_converter(self):
self._verify_each(test_dicts[1].to_dict(), BIGWIG_TO_WIG_EXPECTATIONS)
def _verify_each(self, target_dict: dict, expectations: List[Any]):
- assert_json_encodable(target_dict)
- for path, expectation in expectations:
- exception = target_dict.get("exception")
- assert not exception, f"Test failed to generate with exception {exception}"
- self._verify(target_dict, path, expectation)
-
- def _verify(self, target_dict: dict, expectation_path: List[str], expectation: Any):
- rest = target_dict
- for path_part in expectation_path:
- rest = rest[path_part]
- assert rest == expectation, f"{rest} != {expectation} for {expectation_path}"
-
-
-def assert_json_encodable(as_dict: dict):
- json.dumps(as_dict)
+ exception = target_dict.get("exception")
+ assert not exception, f"Test failed to generate with exception {exception}"
+ dict_verify_each(target_dict, expectations)
diff --git a/test/unit/tool_util/util.py b/test/unit/tool_util/util.py
index c94345b29f54..b03aff0fc15f 100644
--- a/test/unit/tool_util/util.py
+++ b/test/unit/tool_util/util.py
@@ -1,11 +1,35 @@
+import json
from contextlib import contextmanager
from os import environ
+from typing import (
+ Any,
+ List,
+)
import pytest
external_dependency_management = pytest.mark.external_dependency_management
+def dict_verify_each(target_dict: dict, expectations: List[Any]):
+ assert_json_encodable(target_dict)
+ for path, expectation in expectations:
+ exception = target_dict.get("exception")
+ assert not exception, f"Test failed to generate with exception {exception}"
+ dict_verify(target_dict, path, expectation)
+
+
+def dict_verify(target_dict: dict, expectation_path: List[str], expectation: Any):
+ rest = target_dict
+ for path_part in expectation_path:
+ rest = rest[path_part]
+ assert rest == expectation, f"{rest} != {expectation} for {expectation_path}"
+
+
+def assert_json_encodable(as_dict: dict):
+ json.dumps(as_dict)
+
+
@contextmanager
def modify_environ(values, keys_to_remove=None):
"""