From a8efcf83f9e6f6823775295384f7299aa842537f Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Thu, 25 Jul 2024 22:02:48 +0200 Subject: [PATCH 1/3] Replace Union with |. Signed-off-by: Zhen Wang --- code_generation/code_gen.py | 5 +- code_generation/meta_data.py | 20 +-- .../core/dataset_class_maps.py.jinja | 6 +- docs/examples/Validation Examples.ipynb | 10 +- docs/user_manual/data-validator.md | 14 +- setup.py | 14 +- src/power_grid_model/_utils.py | 10 +- src/power_grid_model/core/buffer_handling.py | 16 +-- src/power_grid_model/core/data_handling.py | 8 +- .../core/dataset_definitions.py | 6 +- .../core/power_grid_dataset.py | 34 ++--- src/power_grid_model/core/power_grid_meta.py | 16 +-- src/power_grid_model/core/power_grid_model.py | 85 +++++-------- src/power_grid_model/core/serialization.py | 38 ++---- src/power_grid_model/data_types.py | 34 ++--- src/power_grid_model/errors.py | 5 +- src/power_grid_model/validation/assertions.py | 4 +- src/power_grid_model/validation/errors.py | 56 ++++---- src/power_grid_model/validation/rules.py | 120 +++++++++--------- src/power_grid_model/validation/utils.py | 16 +-- src/power_grid_model/validation/validation.py | 64 +++++----- tests/unit/test_0Z_model_validation.py | 12 +- tests/unit/test_power_grid_model.py | 1 - tests/unit/test_serialization.py | 6 +- tests/unit/utils.py | 10 +- .../unit/validation/test_batch_validation.py | 5 +- .../unit/validation/test_input_validation.py | 3 +- .../validation/test_validation_functions.py | 3 +- 28 files changed, 285 insertions(+), 336 deletions(-) diff --git a/code_generation/code_gen.py b/code_generation/code_gen.py index 32ec49a3d..dbbef5c55 100644 --- a/code_generation/code_gen.py +++ b/code_generation/code_gen.py @@ -4,7 +4,6 @@ from copy import deepcopy from pathlib import Path -from typing import Dict, List from jinja2 import Environment, FileSystemLoader from meta_data import AllDatasetMapData, AttributeClass, DatasetMapData, DatasetMetaData @@ -28,7 +27,7 @@ def _data_type_nan(data_type: str): class CodeGenerator: - all_classes: Dict[str, AttributeClass] + all_classes: dict[str, AttributeClass] base_output_path: Path def __init__(self, base_output_path: Path) -> None: @@ -98,7 +97,7 @@ def render_attribute_classes(self, template_path: Path, data_path: Path, output_ def render_dataset_class_maps(self, template_path: Path, data_path: Path, output_path: Path): with open(data_path) as data_file: json_data = data_file.read() - dataset_meta_data: List[DatasetMapData] = AllDatasetMapData.schema().loads(json_data).all_datasets + dataset_meta_data: list[DatasetMapData] = AllDatasetMapData.schema().loads(json_data).all_datasets # create list all_map = {} diff --git a/code_generation/meta_data.py b/code_generation/meta_data.py index 867764e5b..17b426c7a 100644 --- a/code_generation/meta_data.py +++ b/code_generation/meta_data.py @@ -5,7 +5,7 @@ # define dataclass for meta data from dataclasses import dataclass -from typing import Dict, List, Optional, Union +from typing import Optional from dataclasses_json import DataClassJsonMixin @@ -13,7 +13,7 @@ @dataclass class Attribute(DataClassJsonMixin): data_type: str - names: Union[str, List[str]] + names: str | list[str] description: str nan_value: Optional[str] = None @@ -21,25 +21,25 @@ class Attribute(DataClassJsonMixin): @dataclass class AttributeClass(DataClassJsonMixin): name: str - attributes: List[Attribute] - full_attributes: Optional[List[Attribute]] = None + attributes: list[Attribute] + full_attributes: Optional[list[Attribute]] = None base: Optional[str] = None is_template: bool = False full_name: Optional[str] = None - specification_names: Optional[List[str]] = None - base_attributes: Optional[Dict[str, List[Attribute]]] = None + specification_names: Optional[list[str]] = None + base_attributes: Optional[dict[str, list[Attribute]]] = None @dataclass class DatasetMetaData(DataClassJsonMixin): name: str include_guard: str - classes: List[AttributeClass] + classes: list[AttributeClass] @dataclass class ObjectMapData(DataClassJsonMixin): - names: List[str] + names: list[str] class_name: str @@ -47,9 +47,9 @@ class ObjectMapData(DataClassJsonMixin): class DatasetMapData(DataClassJsonMixin): name: str is_template: bool - components: List[ObjectMapData] + components: list[ObjectMapData] @dataclass class AllDatasetMapData(DataClassJsonMixin): - all_datasets: List[DatasetMapData] + all_datasets: list[DatasetMapData] diff --git a/code_generation/templates/src/power_grid_model/core/dataset_class_maps.py.jinja b/code_generation/templates/src/power_grid_model/core/dataset_class_maps.py.jinja index f6b450aa5..2c0693186 100644 --- a/code_generation/templates/src/power_grid_model/core/dataset_class_maps.py.jinja +++ b/code_generation/templates/src/power_grid_model/core/dataset_class_maps.py.jinja @@ -10,7 +10,7 @@ {%- set components = all_map['input'].keys() %} from enum import Enum, EnumMeta -from typing import Any, Dict, Mapping +from typing import Any, Mapping # pylint: disable=invalid-name @@ -68,7 +68,7 @@ def _str_to_datatype(data_type: Any) -> DatasetType: return data_type -def _map_to_datatypes(data: Mapping[Any, Any]) -> Dict[DatasetType, Any]: +def _map_to_datatypes(data: Mapping[Any, Any]) -> dict[DatasetType, Any]: """Helper function to map datatype str keys to DatasetType.""" return {_str_to_datatype(key): value for key, value in data.items()} @@ -80,7 +80,7 @@ def _str_to_component_type(component: Any) -> ComponentType: return component -def _map_to_component_types(data: Mapping[Any, Any]) -> Dict[ComponentType, Any]: +def _map_to_component_types(data: Mapping[Any, Any]) -> dict[ComponentType, Any]: """Helper function to map componenttype str keys to ComponentType.""" return {_str_to_component_type(key): value for key, value in data.items()} diff --git a/docs/examples/Validation Examples.ipynb b/docs/examples/Validation Examples.ipynb index cf3d4c0d7..aecc4c70f 100644 --- a/docs/examples/Validation Examples.ipynb +++ b/docs/examples/Validation Examples.ipynb @@ -21,8 +21,8 @@ "# Manual validation\n", "# validate_input_data() assumes that you won't be using update data in your calculation.\n", "# validate_batch_data() validates input_data in combination with batch/update data.\n", - "validate_input_data(input_data, calculation_type, symmetric) -> List[ValidationError]\n", - "validate_batch_data(input_data, update_data, calculation_type, symmetric) -> Dict[int, List[ValidationError]]\n", + "validate_input_data(input_data, calculation_type, symmetric) -> list[ValidationError]\n", + "validate_batch_data(input_data, update_data, calculation_type, symmetric) -> dict[int, list[ValidationError]]\n", "\n", "# Assertions\n", "# assert_valid_input_data() and assert_valid_batch_data() raise a ValidationException,\n", @@ -42,13 +42,13 @@ "class ValidationError:\n", " \n", " # Component(s): e.g. ComponentType.node or [ComponentType.node, ComponentType.line]\n", - " component: Union[ComponentType, List[ComponentType]]\n", + " component: ComponentType | list[ComponentType]\n", " \n", " # Field(s): e.g. \"id\" or [\"line_from\", \"line_to\"] or [(ComponentType.node, \"id\"), (ComponentType.line, \"id\")]\n", - " field: Union[str, List[str], List[Tuple[ComponentType, str]]]\n", + " field: str | list[str] | list[Tuple[ComponentType, str]]\n", "\n", " # IDs: e.g. [1, 2, 3] or [(ComponentType.node, 1), (ComponentType.line, 1)]\n", - " ids: Union[List[int], List[Tuple[ComponentType, int]]] = [] \n", + " ids: list[int] | list[Tuple[ComponentType, int]] = [] \n", " \n", "```\n", "\n", diff --git a/docs/user_manual/data-validator.md b/docs/user_manual/data-validator.md index 60e63d402..2fa480225 100644 --- a/docs/user_manual/data-validator.md +++ b/docs/user_manual/data-validator.md @@ -15,8 +15,8 @@ Two helper type definitions are used throughout the validation functions, `Input special types or classes, but merely type hinting aliases: ```python -InputData = Dict[str, np.ndarray] -UpdateData = Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]] +InputData = dict[str, np.ndarray] +UpdateData = dict[str, np.ndarray | dict[str, np.ndarray]] ``` ```{seealso} @@ -33,13 +33,13 @@ e.g. which object IDs are involved. class ValidationError: # Component(s): e.g. "node" or ["node", "line"] - component: Union[str, List[str]] + component: str | list[str] # Field(s): e.g. "id" or ["line_from", "line_to"] or [("node", "id"), ("line", "id")] - field: Union[str, List[str], List[Tuple[str, str]]] + field: str | list[str] | list[Tuple[str, str]] # IDs: e.g. [1, 2, 3] or [("node", 1), ("line", 1)] - ids: Union[List[int], List[Tuple[str, int]]] = [] + ids: list[int] | list[Tuple[str, int]] = [] ``` ## Validation functions @@ -51,8 +51,8 @@ and `symmetric: bool`, stating if the data will be used for symmetric or asymmet allow missing values for unused fields; see the [API reference](../api_reference/python-api-reference.md#enum) for more information. To validate update/batch data `update_data: UpdateData`, power-grid-model update data, should also be supplied. -- `validate_input_data(input_data, calculation_type, symmetric) -> List[ValidationError]` validates input_data. -- `validate_batch_data(input_data, update_data, calculation_type, symmetric) -> Dict[int, List[ValidationError]]` validates input_data in combination with batch/update data. +- `validate_input_data(input_data, calculation_type, symmetric) -> list[ValidationError]` validates input_data. +- `validate_batch_data(input_data, update_data, calculation_type, symmetric) -> dict[int, list[ValidationError]]` validates input_data in combination with batch/update data. ```{note} When doing [batch calculations](./calculations.md#batch-calculations), the input data set by itself is not required to be valid, as long as all missing and invalid values are overwritten in all scenarios in the batch data set. diff --git a/setup.py b/setup.py index 08dee9fee..b5b85e687 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ raise SystemError("Only Windows, Linux, or MacOS is supported!") -def get_required_dependency_include() -> List[str]: +def get_required_dependency_include() -> list[str]: """ Get build requirements includes. @@ -39,7 +39,7 @@ def get_required_dependency_include() -> List[str]: return [] -def get_pre_installed_header_include() -> List[str]: +def get_pre_installed_header_include() -> list[str]: """ Get header files from pybuild_header_dependency, if it is installed @@ -55,7 +55,7 @@ def get_pre_installed_header_include() -> List[str]: return [] -def get_conda_include() -> List[str]: +def get_conda_include() -> list[str]: """ Get conda include path, if we are inside conda environment @@ -151,10 +151,10 @@ def generate_build_ext(pkg_dir: Path, pkg_name: str): include_dirs += get_pre_installed_header_include() include_dirs += get_conda_include() # compiler and link flag - cflags: List[str] = [] - lflags: List[str] = [] - library_dirs: List[str] = [] - libraries: List[str] = [] + cflags: list[str] = [] + lflags: list[str] = [] + library_dirs: list[str] = [] + libraries: list[str] = [] sources = [ str(pgm_c / pgm_c / "src" / "handle.cpp"), str(pgm_c / pgm_c / "src" / "meta_data.cpp"), diff --git a/src/power_grid_model/_utils.py b/src/power_grid_model/_utils.py index 26f035b22..9cbbc1a54 100644 --- a/src/power_grid_model/_utils.py +++ b/src/power_grid_model/_utils.py @@ -11,7 +11,7 @@ """ from copy import deepcopy -from typing import List, Optional, Union, cast +from typing import Optional, cast import numpy as np @@ -101,7 +101,7 @@ def get_and_verify_batch_sizes(batch_data: BatchDataset) -> int: """ n_batch_size = 0 - checked_components: List[ComponentType] = [] + checked_components: list[ComponentType] = [] for component, data in batch_data.items(): n_component_batch_size = get_batch_size(data) if checked_components and n_component_batch_size != n_batch_size: @@ -150,9 +150,7 @@ def get_batch_size(batch_data: BatchArray) -> int: return n_batches -def split_numpy_array_in_batches( - data: Union[DenseBatchArray, SingleArray], component: ComponentType -) -> List[np.ndarray]: +def split_numpy_array_in_batches(data: DenseBatchArray | SingleArray, component: ComponentType) -> list[np.ndarray]: """ Split a single dense numpy array into one or more batches @@ -179,7 +177,7 @@ def split_numpy_array_in_batches( ) -def split_sparse_batches_in_batches(batch_data: SparseBatchArray, component: ComponentType) -> List[np.ndarray]: +def split_sparse_batches_in_batches(batch_data: SparseBatchArray, component: ComponentType) -> list[np.ndarray]: """ Split a single numpy array representing, a compressed sparse structure, into one or more batches diff --git a/src/power_grid_model/core/buffer_handling.py b/src/power_grid_model/core/buffer_handling.py index 82e69581f..4050f0a7a 100644 --- a/src/power_grid_model/core/buffer_handling.py +++ b/src/power_grid_model/core/buffer_handling.py @@ -8,7 +8,7 @@ from dataclasses import dataclass -from typing import Dict, Mapping, Optional, Tuple, Union +from typing import Mapping, Optional import numpy as np @@ -148,12 +148,12 @@ def _get_sparse_buffer_properties(data: Mapping[str, np.ndarray]) -> BufferPrope ) -def get_buffer_properties(data: Union[np.ndarray, Mapping[str, np.ndarray]]) -> BufferProperties: +def get_buffer_properties(data: np.ndarray | Mapping[str, np.ndarray]) -> BufferProperties: """ Extract the properties of the dataset component Args: - data (Union[np.ndarray, Mapping[str, np.ndarray]]): the dataset component. + data (np.ndarray | Mapping[str, np.ndarray]): the dataset component. Raises: ValueError: if the dataset component contains conflicting or bad data. @@ -214,7 +214,7 @@ def _get_sparse_buffer_view(data: Mapping[str, np.ndarray], schema: ComponentMet ) -def get_buffer_view(data: Union[np.ndarray, Mapping[str, np.ndarray]], schema: ComponentMetaData) -> CBuffer: +def get_buffer_view(data: np.ndarray | Mapping[str, np.ndarray], schema: ComponentMetaData) -> CBuffer: """ Get a C API compatible view on a buffer. @@ -233,7 +233,7 @@ def get_buffer_view(data: Union[np.ndarray, Mapping[str, np.ndarray]], schema: C def create_buffer( properties: BufferProperties, schema: ComponentMetaData -) -> Union[np.ndarray, Dict[ComponentType, np.ndarray]]: +) -> np.ndarray | dict[ComponentType, np.ndarray]: """ Create a buffer with the provided properties and type. @@ -245,7 +245,7 @@ def create_buffer( ValueError: if the buffer properties are not consistent. Returns: - Union[np.ndarray, Dict[str, np.ndarray]]: a buffer with the correct properties. + np.ndarray | dict[[str, np.ndarray]: a buffer with the correct properties. """ if properties.is_sparse: return _create_sparse_buffer(properties=properties, schema=schema) @@ -270,7 +270,7 @@ def _create_uniform_buffer(properties: BufferProperties, schema: ComponentMetaDa if properties.is_sparse: raise ValueError(f"A uniform buffer cannot be sparse. {VALIDATOR_MSG}") - shape: Union[int, Tuple[int, int]] = ( + shape: int | tuple[int, int] = ( (properties.batch_size, properties.n_elements_per_scenario) if properties.is_batch else properties.n_elements_per_scenario @@ -278,7 +278,7 @@ def _create_uniform_buffer(properties: BufferProperties, schema: ComponentMetaDa return np.empty(shape=shape, dtype=schema.dtype) -def _create_sparse_buffer(properties: BufferProperties, schema: ComponentMetaData) -> Dict[str, np.ndarray]: +def _create_sparse_buffer(properties: BufferProperties, schema: ComponentMetaData) -> dict[str, np.ndarray]: """ Create a sparse buffer with the provided properties and type. diff --git a/src/power_grid_model/core/data_handling.py b/src/power_grid_model/core/data_handling.py index 9d9509806..33608d1e8 100644 --- a/src/power_grid_model/core/data_handling.py +++ b/src/power_grid_model/core/data_handling.py @@ -8,7 +8,7 @@ from enum import Enum -from typing import Mapping, Tuple, Union +from typing import Mapping, Tuple import numpy as np @@ -70,9 +70,7 @@ def prepare_input_view(input_data: Mapping[ComponentType, np.ndarray]) -> CConst return CConstDataset(input_data, dataset_type=DatasetType.input) -def prepare_update_view( - update_data: Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]] -) -> CConstDataset: +def prepare_update_view(update_data: Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]]) -> CConstDataset: """ Create a view of the update data, or an empty view if not provided, in a format compatible with the PGM core libary. @@ -139,7 +137,7 @@ def create_output_data( for name, count in all_component_count.items(): # shape if is_batch: - shape: Union[Tuple[int], Tuple[int, int]] = (batch_size, count) + shape: Tuple[int] | Tuple[int, int] = (batch_size, count) else: shape = (count,) result_dict[name] = initialize_array(output_type.value, name, shape=shape, empty=True) diff --git a/src/power_grid_model/core/dataset_definitions.py b/src/power_grid_model/core/dataset_definitions.py index 3095b53df..d14ebd2df 100644 --- a/src/power_grid_model/core/dataset_definitions.py +++ b/src/power_grid_model/core/dataset_definitions.py @@ -7,7 +7,7 @@ # This file is automatically generated. DO NOT modify it manually! from enum import Enum, EnumMeta -from typing import Any, Dict, Mapping +from typing import Any, Mapping # pylint: disable=invalid-name @@ -83,7 +83,7 @@ def _str_to_datatype(data_type: Any) -> DatasetType: return data_type -def _map_to_datatypes(data: Mapping[Any, Any]) -> Dict[DatasetType, Any]: +def _map_to_datatypes(data: Mapping[Any, Any]) -> dict[DatasetType, Any]: """Helper function to map datatype str keys to DatasetType.""" return {_str_to_datatype(key): value for key, value in data.items()} @@ -95,7 +95,7 @@ def _str_to_component_type(component: Any) -> ComponentType: return component -def _map_to_component_types(data: Mapping[Any, Any]) -> Dict[ComponentType, Any]: +def _map_to_component_types(data: Mapping[Any, Any]) -> dict[ComponentType, Any]: """Helper function to map componenttype str keys to ComponentType.""" return {_str_to_component_type(key): value for key, value in data.items()} diff --git a/src/power_grid_model/core/power_grid_dataset.py b/src/power_grid_model/core/power_grid_dataset.py index 0f48501f5..fd3453415 100644 --- a/src/power_grid_model/core/power_grid_dataset.py +++ b/src/power_grid_model/core/power_grid_dataset.py @@ -6,7 +6,7 @@ Power grid model raw dataset handler """ -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Mapping, Optional import numpy as np @@ -84,7 +84,7 @@ def n_components(self) -> int: """ return pgc.dataset_info_n_components(self._info) - def components(self) -> List[ComponentType]: + def components(self) -> list[ComponentType]: """ The components in the dataset. @@ -124,7 +124,7 @@ def total_elements(self) -> Mapping[ComponentType, int]: } -def get_dataset_type(data: Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]]) -> DatasetType: +def get_dataset_type(data: Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]]) -> DatasetType: """ Deduce the dataset type from the provided dataset. @@ -184,14 +184,11 @@ class CMutableDataset: _is_batch: bool _batch_size: int _mutable_dataset: MutableDatasetPtr - _buffer_views: List[CBuffer] + _buffer_views: list[CBuffer] def __new__( cls, - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], dataset_type: Any = None, ): instance = super().__new__(cls) @@ -237,7 +234,7 @@ def get_info(self) -> CDatasetInfo: """ return CDatasetInfo(pgc.dataset_mutable_get_info(self._mutable_dataset)) - def get_buffer_views(self) -> List[CBuffer]: + def get_buffer_views(self) -> list[CBuffer]: """ Get list of buffer views @@ -247,11 +244,7 @@ def get_buffer_views(self) -> List[CBuffer]: return self._buffer_views def _add_data( - self, - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + self, data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]] ): """ Add Power Grid Model data to the mutable dataset view. @@ -270,7 +263,7 @@ def _add_data( def _add_component_data( self, component: ComponentType, - data: Union[np.ndarray, Mapping[str, np.ndarray]], + data: np.ndarray | Mapping[str, np.ndarray], allow_unknown: bool = False, ): """ @@ -307,7 +300,7 @@ def _register_buffer(self, component: ComponentType, buffer: CBuffer): ) assert_no_error() - def _validate_properties(self, data: Union[np.ndarray, Mapping[str, np.ndarray]]): + def _validate_properties(self, data: np.ndarray | Mapping[str, np.ndarray]): properties = get_buffer_properties(data) if properties.is_batch != self._is_batch: raise ValueError( @@ -332,14 +325,11 @@ class CConstDataset: """ _const_dataset: ConstDatasetPtr - _buffer_views: List[CBuffer] + _buffer_views: list[CBuffer] def __new__( cls, - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], dataset_type: Optional[DatasetType] = None, ): instance = super().__new__(cls) @@ -428,7 +418,7 @@ def get_data(self) -> Dataset: """ return self._data - def get_component_data(self, component: ComponentType) -> Union[np.ndarray, Mapping[str, np.ndarray]]: + def get_component_data(self, component: ComponentType) -> np.ndarray | Mapping[str, np.ndarray]: """ Retrieve Power Grid Model data from the dataset for a specific component. diff --git a/src/power_grid_model/core/power_grid_meta.py b/src/power_grid_model/core/power_grid_meta.py index dec9ccc28..ce69a0844 100644 --- a/src/power_grid_model/core/power_grid_meta.py +++ b/src/power_grid_model/core/power_grid_meta.py @@ -8,7 +8,7 @@ from dataclasses import dataclass from enum import IntEnum -from typing import Any, Dict, Union +from typing import Any import numpy as np @@ -49,8 +49,8 @@ class ComponentMetaData: """ dtype: np.dtype - dtype_dict: Dict[str, Any] - nans: Dict[str, Union[float, int]] + dtype_dict: dict[str, Any] + nans: dict[str, float | int] nan_scalar: np.ndarray def __getitem__(self, item): @@ -66,8 +66,8 @@ def __getitem__(self, item): return getattr(self, item) -DatasetMetaData = Dict[ComponentType, ComponentMetaData] -PowerGridMetaData = Dict[DatasetType, DatasetMetaData] +DatasetMetaData = dict[ComponentType, ComponentMetaData] +PowerGridMetaData = dict[DatasetType, DatasetMetaData] def _generate_meta_data() -> PowerGridMetaData: @@ -165,9 +165,9 @@ def _generate_meta_attributes(component: ComponentPtr) -> dict: def initialize_array( - data_type: Union[str, DatasetType], - component_type: Union[str, ComponentType], - shape: Union[tuple, int], + data_type: str | DatasetType, + component_type: str | ComponentType, + shape: tuple | int, empty: bool = False, ) -> np.ndarray: """ diff --git a/src/power_grid_model/core/power_grid_model.py b/src/power_grid_model/core/power_grid_model.py index d2c4cc29f..ab0687bb9 100644 --- a/src/power_grid_model/core/power_grid_model.py +++ b/src/power_grid_model/core/power_grid_model.py @@ -6,7 +6,7 @@ Main power grid model class """ from enum import IntEnum -from typing import Dict, List, Optional, Set, Type, Union +from typing import Optional, Set, Type import numpy as np @@ -41,7 +41,7 @@ class PowerGridModel: """ _model_ptr: ModelPtr - _all_component_count: Optional[Dict[ComponentType, int]] + _all_component_count: Optional[dict[ComponentType, int]] _batch_error: Optional[PowerGridBatchError] @property @@ -61,7 +61,7 @@ def _model(self): return self._model_ptr @property - def all_component_count(self) -> Dict[ComponentType, int]: + def all_component_count(self) -> dict[ComponentType, int]: """ Get count of number of elements per component type. If the count for a component type is zero, it will not be in the returned dictionary. @@ -99,7 +99,7 @@ def __new__(cls, *_args, **_kwargs): return instance def __init__( - self, input_data: Union[Dict[ComponentType, np.ndarray], Dict[str, np.ndarray]], system_frequency: float = 50.0 + self, input_data: dict[ComponentType, np.ndarray] | dict[str, np.ndarray], system_frequency: float = 50.0 ): """ Initialize the model from an input data set. @@ -122,7 +122,7 @@ def __init__( assert_no_error() self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0} - def update(self, *, update_data: Union[Dict[ComponentType, np.ndarray], Dict[str, np.ndarray]]): + def update(self, *, update_data: dict[ComponentType, np.ndarray] | dict[str, np.ndarray]): """ Update the model with changes. @@ -140,7 +140,7 @@ def update(self, *, update_data: Union[Dict[ComponentType, np.ndarray], Dict[str pgc.update_model(self._model, prepared_update.get_dataset_ptr()) assert_no_error() - def get_indexer(self, component_type: Union[ComponentType, str], ids: np.ndarray): + def get_indexer(self, component_type: ComponentType | str, ids: np.ndarray): """ Get array of indexers given array of ids for component type @@ -195,7 +195,7 @@ def _construct_output( symmetric: bool, is_batch: bool, batch_size: int, - ) -> Dict[ComponentType, np.ndarray]: + ) -> dict[ComponentType, np.ndarray]: all_component_count = self._get_output_component_count(calculation_type=calculation_type) return create_output_data( output_component_types=output_component_types, @@ -238,7 +238,7 @@ def _calculate_impl( options: Options, continue_on_batch_error: bool, decode_error: bool, - experimental_features: Union[_ExperimentalFeatures, str], # pylint: disable=too-many-arguments + experimental_features: _ExperimentalFeatures | str, # pylint: disable=too-many-arguments ): """ Core calculation routine @@ -312,14 +312,14 @@ def _calculate_power_flow( symmetric: bool = True, error_tolerance: float = 1e-8, max_iterations: int = 20, - calculation_method: Union[CalculationMethod, str] = CalculationMethod.newton_raphson, + calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, update_data: Optional[Dataset] = None, threading: int = -1, - output_component_types: Optional[Union[Set[ComponentType], List[ComponentType]]] = None, + output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, - tap_changing_strategy: Union[TapChangingStrategy, str] = TapChangingStrategy.disabled, - experimental_features: Union[_ExperimentalFeatures, str] = _ExperimentalFeatures.disabled, + tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled, + experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled, ): calculation_type = CalculationType.power_flow options = self._options( @@ -349,14 +349,14 @@ def _calculate_state_estimation( symmetric: bool = True, error_tolerance: float = 1e-8, max_iterations: int = 20, - calculation_method: Union[CalculationMethod, str] = CalculationMethod.iterative_linear, + calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, update_data: Optional[Dataset] = None, threading: int = -1, - output_component_types: Optional[Union[Set[ComponentType], List[ComponentType]]] = None, + output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, - experimental_features: Union[_ExperimentalFeatures, str] = _ExperimentalFeatures.disabled, - ) -> Dict[ComponentType, np.ndarray]: + experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled, + ) -> dict[ComponentType, np.ndarray]: calculation_type = CalculationType.state_estimation options = self._options( calculation_type=calculation_type, @@ -381,15 +381,15 @@ def _calculate_state_estimation( def _calculate_short_circuit( self, *, - calculation_method: Union[CalculationMethod, str] = CalculationMethod.iec60909, + calculation_method: CalculationMethod | str = CalculationMethod.iec60909, update_data: Optional[Dataset] = None, threading: int = -1, - output_component_types: Optional[Union[Set[ComponentType], List[ComponentType]]] = None, + output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, - short_circuit_voltage_scaling: Union[ShortCircuitVoltageScaling, str] = ShortCircuitVoltageScaling.maximum, - experimental_features: Union[_ExperimentalFeatures, str] = _ExperimentalFeatures.disabled, - ) -> Dict[ComponentType, np.ndarray]: + short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum, + experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled, + ) -> dict[ComponentType, np.ndarray]: calculation_type = CalculationType.short_circuit symmetric = False @@ -418,19 +418,14 @@ def calculate_power_flow( symmetric: bool = True, error_tolerance: float = 1e-8, max_iterations: int = 20, - calculation_method: Union[CalculationMethod, str] = CalculationMethod.newton_raphson, - update_data: Optional[ - Union[ - Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]], - Dataset, - ] - ] = None, + calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, + update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, threading: int = -1, - output_component_types: Optional[Union[Set[ComponentType], List[ComponentType]]] = None, + output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, - tap_changing_strategy: Union[TapChangingStrategy, str] = TapChangingStrategy.disabled, - ) -> Dict[ComponentType, np.ndarray]: + tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled, + ) -> dict[ComponentType, np.ndarray]: """ Calculate power flow once with the current model attributes. Or calculate in batch with the given update dataset in batch. @@ -512,18 +507,13 @@ def calculate_state_estimation( symmetric: bool = True, error_tolerance: float = 1e-8, max_iterations: int = 20, - calculation_method: Union[CalculationMethod, str] = CalculationMethod.iterative_linear, - update_data: Optional[ - Union[ - Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]], - Dataset, - ] - ] = None, + calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, + update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, threading: int = -1, - output_component_types: Optional[Union[Set[ComponentType], List[ComponentType]]] = None, + output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, - ) -> Dict[ComponentType, np.ndarray]: + ) -> dict[ComponentType, np.ndarray]: """ Calculate state estimation once with the current model attributes. Or calculate in batch with the given update dataset in batch. @@ -598,19 +588,14 @@ def calculate_state_estimation( def calculate_short_circuit( self, *, - calculation_method: Union[CalculationMethod, str] = CalculationMethod.iec60909, - update_data: Optional[ - Union[ - Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]], - Dataset, - ] - ] = None, + calculation_method: CalculationMethod | str = CalculationMethod.iec60909, + update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, threading: int = -1, - output_component_types: Optional[Union[Set[ComponentType], List[ComponentType]]] = None, + output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, - short_circuit_voltage_scaling: Union[ShortCircuitVoltageScaling, str] = ShortCircuitVoltageScaling.maximum, - ) -> Dict[ComponentType, np.ndarray]: + short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum, + ) -> dict[ComponentType, np.ndarray]: """ Calculate a short circuit once with the current model attributes. Or calculate in batch with the given update dataset in batch diff --git a/src/power_grid_model/core/serialization.py b/src/power_grid_model/core/serialization.py index 553834966..001b2f5a4 100644 --- a/src/power_grid_model/core/serialization.py +++ b/src/power_grid_model/core/serialization.py @@ -9,7 +9,7 @@ from abc import ABC, abstractmethod from ctypes import byref from enum import IntEnum -from typing import Mapping, Optional, Union +from typing import Mapping, Optional import numpy as np @@ -49,7 +49,7 @@ class Deserializer: _dataset_ptr: WritableDatasetPtr _dataset: CWritableDataset - def __new__(cls, data: Union[str, bytes], serialization_type: SerializationType): + def __new__(cls, data: str | bytes, serialization_type: SerializationType): instance = super().__new__(cls) raw_data = data if isinstance(data, bytes) else data.encode() @@ -90,19 +90,13 @@ class Serializer(ABC): Serializer for the Power grid model """ - _data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ] + _data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]] _dataset: CConstDataset _serializer: SerializerPtr def __new__( cls, - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], serialization_type: SerializationType, dataset_type: Optional[DatasetType] = None, ): @@ -190,7 +184,7 @@ class JsonDeserializer(Deserializer): # pylint: disable=too-few-public-methods JSON deserializer for the Power grid model """ - def __new__(cls, data: Union[str, bytes]): + def __new__(cls, data: str | bytes): return super().__new__(cls, data, SerializationType.JSON) @@ -210,10 +204,7 @@ class JsonSerializer(_StringSerializer): # pylint: disable=too-few-public-metho def __new__( cls, - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], dataset_type: Optional[DatasetType] = None, ): return super().__new__(cls, data, SerializationType.JSON, dataset_type=dataset_type) @@ -226,16 +217,13 @@ class MsgpackSerializer(_BytesSerializer): # pylint: disable=too-few-public-met def __new__( cls, - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], dataset_type: Optional[DatasetType] = None, ): return super().__new__(cls, data, SerializationType.MSGPACK, dataset_type=dataset_type) -def json_deserialize(data: Union[str, bytes]) -> Dataset: +def json_deserialize(data: str | bytes) -> Dataset: """ Load serialized JSON data to a new dataset. @@ -255,10 +243,7 @@ def json_deserialize(data: Union[str, bytes]) -> Dataset: def json_serialize( - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], dataset_type: Optional[DatasetType] = None, use_compact_list: bool = False, indent: int = 2, @@ -310,10 +295,7 @@ def msgpack_deserialize(data: bytes) -> Dataset: def msgpack_serialize( - data: Union[ - Mapping[ComponentType, np.ndarray], - Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], - ], + data: Mapping[ComponentType, np.ndarray] | Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], dataset_type: Optional[DatasetType] = None, use_compact_list: bool = False, ) -> bytes: diff --git a/src/power_grid_model/data_types.py b/src/power_grid_model/data_types.py index 98d351f1c..7bbf8e232 100644 --- a/src/power_grid_model/data_types.py +++ b/src/power_grid_model/data_types.py @@ -7,15 +7,16 @@ have been defined and explained in this file. """ -from typing import Dict, List, Tuple, Union +from typing import TypeVar, Union import numpy as np from power_grid_model.core.dataset_definitions import ComponentType -# When we're dropping python 3.8, we should introduce proper NumPy type hinting +ComponentTypeVar = TypeVar("ComponentTypeVar", ComponentType, str) SingleArray = Union[np.ndarray] + """ A single array is a one-dimensional structured containing a list of components of the same type. @@ -26,12 +27,13 @@ """ DenseBatchArray = Union[np.ndarray] + """ A dense batch array is a two-dimensional structured numpy array containing a list of components of the same type for each scenario. """ -SparseBatchArray = Dict[str, Union[np.ndarray, SingleArray]] +SparseBatchArray = dict[str, np.ndarray | SingleArray] """ A sparse batch array is a dictionary containing the keys `indptr` and `data`. @@ -52,17 +54,17 @@ - scenario 2 sets the statuses of component with id 0 to 0 (and keeps defaults for other components) """ -BatchArray = Union[DenseBatchArray, SparseBatchArray] +BatchArray = DenseBatchArray | SparseBatchArray """ A batch array is a either a :class:`DenseBatchArray` or a :class:`SparseBatchArray`. """ -DataArray = Union[SingleArray, BatchArray] +DataArray = SingleArray | BatchArray """ A data array can be a :class:`SingleArray` or a :class:`BatchArray`. """ -SingleDataset = Dict[ComponentType, SingleArray] +SingleDataset = dict[ComponentTypeVar, SingleArray] """ A single dataset is a dictionary where the keys are the component types and the values are :class:`SingleArray` @@ -70,14 +72,14 @@ - Example: {"node": :class:`SingleArray`, "line": :class:`SingleArray`} """ -BatchDataset = Dict[ComponentType, BatchArray] +BatchDataset = dict[ComponentTypeVar, BatchArray] """ A batch dataset is a dictionary where the keys are the component types and the values are :class:`BatchArray` - Example: {"node": :class:`DenseBatchArray`, "line": :class:`SparseBatchArray`} """ -Dataset = Union[SingleDataset, BatchDataset] +Dataset = SingleDataset | BatchDataset """ A general data set can be a :class:`SingleDataset` or a :class:`BatchDataset`. @@ -87,7 +89,7 @@ - batch: {"node": :class:`DenseBatchArray`, "line": :class:`SparseBatchArray`} """ -BatchList = List[SingleDataset] +BatchList = list[SingleDataset] """ A batch list is an alternative representation of a batch. It is a list of single datasets, where each single dataset is actually a batch. The batch list is intended as an intermediate data type, during conversions. @@ -109,14 +111,14 @@ - Example: 10500.0 """ -AsymValue = Tuple[RealValue, RealValue, RealValue] +AsymValue = tuple[RealValue, RealValue, RealValue] """ Asymmetrical values are three-phase values like p or u_measured. - Example: (10400.0, 10500.0, 10600.0) """ -AttributeValue = Union[RealValue, NominalValue, AsymValue] +AttributeValue = RealValue | NominalValue | AsymValue """ When representing a grid as a native python structure, each attribute (u_rated etc) is either a nominal value, a real value, or a tuple of three real values. @@ -128,7 +130,7 @@ - asym: (10400.0, 10500.0, 10600.0) """ -Component = Dict[str, Union[AttributeValue, str]] +Component = dict[str, AttributeValue | str] """ A component, when represented in native python format, is a dictionary, where the keys are the attributes and the values are the corresponding values. It is allowed to add extra fields, containing either an AttributeValue or a string. @@ -136,7 +138,7 @@ - Example: {"id": 1, "u_rated": 10500.0, "original_id": "Busbar #1"} """ -ComponentList = List[Component] +ComponentList = list[Component] """ A component list is a list containing components. In essence it stores the same information as a np.ndarray, but in a native python format, without using numpy. @@ -144,7 +146,7 @@ - Example: [{"id": 1, "u_rated": 10500.0}, {"id": 2, "u_rated": 10500.0}] """ -SinglePythonDataset = Dict[ComponentType, ComponentList] +SinglePythonDataset = dict[ComponentTypeVar, ComponentList] """ A single dataset in native python representation is a dictionary, where the keys are the component names and the values are a list of all the instances of such a component. In essence it stores the same information as a @@ -158,7 +160,7 @@ } """ -BatchPythonDataset = List[SinglePythonDataset] +BatchPythonDataset = list[SinglePythonDataset] """ A batch dataset in native python representation is a list of dictionaries, where the keys are the component names and the values are a list of all the instances of such a component. In essence it stores the same information as a @@ -170,7 +172,7 @@ {"line": [{"id": 3, "from_status": 1, "to_status": 1, ...}],}] """ -PythonDataset = Union[SinglePythonDataset, BatchPythonDataset] +PythonDataset = SinglePythonDataset | BatchPythonDataset """ A general python data set can be a single or a batch python dataset. diff --git a/src/power_grid_model/errors.py b/src/power_grid_model/errors.py index 5223284b1..d8849bcbf 100644 --- a/src/power_grid_model/errors.py +++ b/src/power_grid_model/errors.py @@ -6,7 +6,6 @@ Error classes """ -from typing import List import numpy as np @@ -20,8 +19,8 @@ class PowerGridBatchError(PowerGridError): failed_scenarios: np.ndarray succeeded_scenarios: np.ndarray - error_messages: List[str] - errors: List[PowerGridError] + error_messages: list[str] + errors: list[PowerGridError] class InvalidArguments(PowerGridError): diff --git a/src/power_grid_model/validation/assertions.py b/src/power_grid_model/validation/assertions.py index 924fcdcff..862e45cfe 100644 --- a/src/power_grid_model/validation/assertions.py +++ b/src/power_grid_model/validation/assertions.py @@ -6,7 +6,7 @@ Helper functions to assert valid data. They basically call validate_input_data or validate_batch_data and raise a ValidationException if the validation results in one or more errors. """ -from typing import Dict, List, Optional, Union +from typing import Optional from power_grid_model.data_types import BatchDataset, SingleDataset from power_grid_model.enum import CalculationType @@ -21,7 +21,7 @@ class ValidationException(ValueError): to display a summary of all the errors when printing the exception. """ - def __init__(self, errors: Union[List[ValidationError], Dict[int, List[ValidationError]]], name: str = "data"): + def __init__(self, errors: list[ValidationError] | dict[int, list[ValidationError]], name: str = "data"): super().__init__(f"Invalid {name}") self.errors = errors self.name = name diff --git a/src/power_grid_model/validation/errors.py b/src/power_grid_model/validation/errors.py index cfc2415c6..10586e3c8 100644 --- a/src/power_grid_model/validation/errors.py +++ b/src/power_grid_model/validation/errors.py @@ -8,7 +8,7 @@ import re from abc import ABC from enum import Enum -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union +from typing import Any, Iterable, Optional, Set, Type from power_grid_model import ComponentType @@ -32,18 +32,18 @@ class ValidationError(ABC): """ - component: Optional[Union[ComponentType, List[ComponentType]]] = None + component: Optional[ComponentType | list[ComponentType]] = None """ The component, or components, to which the error applies. """ - field: Optional[Union[str, List[str], List[Tuple[ComponentType, str]]]] = None + field: Optional[str | list[str] | list[tuple[ComponentType, str]]] = None """ The field, or fields, to which the error applies. A field can also be a tuple (component, field) when multiple components are being addressed. """ - ids: Optional[Union[List[int], List[Tuple[ComponentType, int]]]] = None + ids: Optional[list[int] | list[tuple[ComponentType, int]]] = None """ The object identifiers to which the error applies. A field object identifier can also be a tuple (component, id) when multiple components are being addressed. @@ -67,7 +67,7 @@ def field_str(self) -> str: """ return f"'{self.field}'" - def get_context(self, id_lookup: Optional[Union[List[str], Dict[int, str]]] = None) -> Dict[str, Any]: + def get_context(self, id_lookup: Optional[list[str] | dict[int, str]] = None) -> dict[str, Any]: """ Returns a dictionary that supplies (human readable) information about this error. Each member variable is included in the dictionary. If a function {field_name}_str() exists, the value is overwritten by that function. @@ -117,7 +117,7 @@ class SingleFieldValidationError(ValidationError): _message = "Field {field} is not valid for {n} {objects}." component: ComponentType field: str - ids: List[int] + ids: list[int] def __init__(self, component: ComponentType, field: str, ids: Iterable[int]): """ @@ -138,10 +138,10 @@ class MultiFieldValidationError(ValidationError): _message = "Combination of fields {field} is not valid for {n} {objects}." component: ComponentType - field: List[str] - ids: List[int] + field: list[str] + ids: list[int] - def __init__(self, component: ComponentType, fields: List[str], ids: List[int]): + def __init__(self, component: ComponentType, fields: list[str], ids: list[int]): """ Args: component: Component name @@ -167,12 +167,12 @@ class MultiComponentValidationError(ValidationError): E.g. the two fields `id` fields of the `node` and `line` component: [('node', 'id'), ('line', 'id')]. """ - component: List[ComponentType] - field: List[Tuple[ComponentType, str]] - ids: List[Tuple[ComponentType, int]] + component: list[ComponentType] + field: list[tuple[ComponentType, str]] + ids: list[tuple[ComponentType, int]] _message = "Fields {field} are not valid for {n} {objects}." - def __init__(self, fields: List[Tuple[ComponentType, str]], ids: List[Tuple[ComponentType, int]]): + def __init__(self, fields: list[tuple[ComponentType, str]], ids: list[tuple[ComponentType, int]]): """ Args: fields: List of field names, formatted as tuples (component, field) @@ -204,11 +204,11 @@ class NotIdenticalError(SingleFieldValidationError): """ _message = "Field {field} is not unique for {n} {objects}: {num_unique} different values." - values: List[Any] + values: list[Any] unique: Set[Any] num_unique: int - def __init__(self, component: ComponentType, field: str, ids: Iterable[int], values: List[Any]): + def __init__(self, component: ComponentType, field: str, ids: Iterable[int], values: list[Any]): super().__init__(component, field, ids) self.values = values self.unique = set(self.values) @@ -240,9 +240,9 @@ class InvalidEnumValueError(SingleFieldValidationError): """ _message = "Field {field} contains invalid {enum} values for {n} {objects}." - enum: Union[Type[Enum], List[Type[Enum]]] + enum: Type[Enum] | list[Type[Enum]] - def __init__(self, component: ComponentType, field: str, ids: List[int], enum: Union[Type[Enum], List[Type[Enum]]]): + def __init__(self, component: ComponentType, field: str, ids: list[int], enum: Type[Enum] | list[Type[Enum]]): super().__init__(component, field, ids) self.enum = enum @@ -295,7 +295,7 @@ class IdNotInDatasetError(SingleFieldValidationError): _message = "ID does not exist in {ref_dataset} for {n} {objects}." ref_dataset: str - def __init__(self, component: ComponentType, ids: List[int], ref_dataset: str): + def __init__(self, component: ComponentType, ids: list[int], ref_dataset: str): super().__init__(component=component, field="id", ids=ids) self.ref_dataset = ref_dataset @@ -320,15 +320,15 @@ class InvalidIdError(SingleFieldValidationError): """ _message = "Field {field} does not contain a valid {ref_components} id for {n} {objects}. {filters}" - ref_components: List[ComponentType] + ref_components: list[ComponentType] def __init__( self, component: ComponentType, field: str, - ids: List[int], - ref_components: Union[ComponentType, List[ComponentType]], - filters: Optional[Dict[str, Any]] = None, + ids: list[int], + ref_components: ComponentType | list[ComponentType], + filters: Optional[dict[str, Any]] = None, ): # pylint: disable=too-many-arguments super().__init__(component=component, field=field, ids=ids) @@ -373,9 +373,9 @@ class ComparisonError(SingleFieldValidationError): _message = "Invalid {field}, compared to {ref_value} for {n} {objects}." - RefType = Union[int, float, str, Tuple[Union[int, float, str], ...]] + RefType = int | float | str | tuple[int | float | str, ...] - def __init__(self, component: ComponentType, field: str, ids: List[int], ref_value: "ComparisonError.RefType"): + def __init__(self, component: ComponentType, field: str, ids: list[int], ref_value: "ComparisonError.RefType"): super().__init__(component, field, ids) self.ref_value = ref_value @@ -479,14 +479,14 @@ class InvalidAssociatedEnumValueError(MultiFieldValidationError): """ _message = "The combination of fields {field} results in invalid {enum} values for {n} {objects}." - enum: Union[Type[Enum], List[Type[Enum]]] + enum: Type[Enum] | list[Type[Enum]] def __init__( self, component: ComponentType, - fields: List[str], - ids: List[int], - enum: Union[Type[Enum], List[Type[Enum]]], + fields: list[str], + ids: list[int], + enum: Type[Enum] | list[Type[Enum]], ): """ Args: diff --git a/src/power_grid_model/validation/rules.py b/src/power_grid_model/validation/rules.py index 0b197fbc2..745773912 100644 --- a/src/power_grid_model/validation/rules.py +++ b/src/power_grid_model/validation/rules.py @@ -30,12 +30,12 @@ The name of the column, which should be an field in the component data (numpy structured array) Output data: - errors: List[ValidationError] + errors: list[ValidationError] A list containing errors; in case of success, `errors` is the empty list: []. """ from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union +from typing import Any, Callable, Optional, Type, TypeVar import numpy as np @@ -81,7 +81,7 @@ CompError = TypeVar("CompError", bound=ComparisonError) -def all_greater_than_zero(data: SingleDataset, component: ComponentType, field: str) -> List[NotGreaterThanError]: +def all_greater_than_zero(data: SingleDataset, component: ComponentType, field: str) -> list[NotGreaterThanError]: """ Check that for all records of a particular type of component, the values in the 'field' column are greater than zero. Returns an empty list on success, or a list containing a single error object on failure. @@ -102,8 +102,8 @@ def all_greater_than_or_equal_to_zero( data: SingleDataset, component: ComponentType, field: str, - default_value: Optional[Union[np.ndarray, int, float]] = None, -) -> List[NotGreaterOrEqualError]: + default_value: Optional[np.ndarray | int | float] = None, +) -> list[NotGreaterOrEqualError]: """ Check that for all records of a particular type of component, the values in the 'field' column are greater than, or equal to zero. Returns an empty list on success, or a list containing a single error object on failure. @@ -112,7 +112,7 @@ def all_greater_than_or_equal_to_zero( data (SingleDataset): The input/update data set for all components component (ComponentType) The component of interest field (str): The field of interest - default_value (Optional[Union[np.ndarray, int, float]], optional): Some values are not required, but will + default_value (Optional[np.ndarray | int | float], optional): Some values are not required, but will receive a default value in the C++ core. To do a proper input validation, these default values should be included in the validation. It can be a fixed value for the entire column (int/float) or be different for each element (np.ndarray). @@ -125,8 +125,8 @@ def all_greater_than_or_equal_to_zero( def all_greater_than( - data: SingleDataset, component: ComponentType, field: str, ref_value: Union[int, float, str] -) -> List[NotGreaterThanError]: + data: SingleDataset, component: ComponentType, field: str, ref_value: int | float | str +) -> list[NotGreaterThanError]: """ Check that for all records of a particular type of component, the values in the 'field' column are greater than the reference value. Returns an empty list on success, or a list containing a single error object on failure. @@ -154,9 +154,9 @@ def all_greater_or_equal( data: SingleDataset, component: ComponentType, field: str, - ref_value: Union[int, float, str], - default_value: Optional[Union[np.ndarray, int, float]] = None, -) -> List[NotGreaterOrEqualError]: + ref_value: int | float | str, + default_value: Optional[np.ndarray | int | float] = None, +) -> list[NotGreaterOrEqualError]: """ Check that for all records of a particular type of component, the values in the 'field' column are greater than, or equal to the reference value. Returns an empty list on success, or a list containing a single error object on @@ -188,8 +188,8 @@ def not_greater_or_equal(val: np.ndarray, *ref: np.ndarray): def all_less_than( - data: SingleDataset, component: ComponentType, field: str, ref_value: Union[int, float, str] -) -> List[NotLessThanError]: + data: SingleDataset, component: ComponentType, field: str, ref_value: int | float | str +) -> list[NotLessThanError]: """ Check that for all records of a particular type of component, the values in the 'field' column are less than the reference value. Returns an empty list on success, or a list containing a single error object on failure. @@ -214,8 +214,8 @@ def not_less(val: np.ndarray, *ref: np.ndarray): def all_less_or_equal( - data: SingleDataset, component: ComponentType, field: str, ref_value: Union[int, float, str] -) -> List[NotLessOrEqualError]: + data: SingleDataset, component: ComponentType, field: str, ref_value: int | float | str +) -> list[NotLessOrEqualError]: """ Check that for all records of a particular type of component, the values in the 'field' column are less than, or equal to the reference value. Returns an empty list on success, or a list containing a single error object on @@ -245,10 +245,10 @@ def all_between( # pylint: disable=too-many-arguments data: SingleDataset, component: ComponentType, field: str, - ref_value_1: Union[int, float, str], - ref_value_2: Union[int, float, str], - default_value: Optional[Union[np.ndarray, int, float]] = None, -) -> List[NotBetweenError]: + ref_value_1: int | float | str, + ref_value_2: int | float | str, + default_value: Optional[np.ndarray | int | float] = None, +) -> list[NotBetweenError]: """ Check that for all records of a particular type of component, the values in the 'field' column are (exclusively) between reference value 1 and 2. Value 1 may be smaller, but also larger than value 2. Returns an empty list on @@ -285,11 +285,11 @@ def all_between_or_at( # pylint: disable=too-many-arguments data: SingleDataset, component: ComponentType, field: str, - ref_value_1: Union[int, float, str], - ref_value_2: Union[int, float, str], - default_value_1: Optional[Union[np.ndarray, int, float]] = None, - default_value_2: Optional[Union[np.ndarray, int, float]] = None, -) -> List[NotBetweenOrAtError]: + ref_value_1: int | float | str, + ref_value_2: int | float | str, + default_value_1: Optional[np.ndarray | int | float] = None, + default_value_2: Optional[np.ndarray | int | float] = None, +) -> list[NotBetweenOrAtError]: """ Check that for all records of a particular type of component, the values in the 'field' column are inclusively between reference value 1 and 2. Value 1 may be smaller, but also larger than value 2. Returns an empty list on @@ -338,9 +338,9 @@ def none_match_comparison( compare_fn: Callable, ref_value: ComparisonError.RefType, error: Type[CompError] = ComparisonError, # type: ignore - default_value_1: Optional[Union[np.ndarray, int, float]] = None, - default_value_2: Optional[Union[np.ndarray, int, float]] = None, -) -> List[CompError]: + default_value_1: Optional[np.ndarray | int | float] = None, + default_value_2: Optional[np.ndarray | int | float] = None, +) -> list[CompError]: # pylint: disable=too-many-arguments """ For all records of a particular type of component, check if the value in the 'field' column match the comparison. @@ -384,7 +384,7 @@ def none_match_comparison( return [] -def all_identical(data: SingleDataset, component: ComponentType, field: str) -> List[NotIdenticalError]: +def all_identical(data: SingleDataset, component: ComponentType, field: str) -> list[NotIdenticalError]: """ Check that for all records of a particular type of component, the values in the 'field' column are identical. @@ -409,7 +409,7 @@ def all_identical(data: SingleDataset, component: ComponentType, field: str) -> def all_enabled_identical( data: SingleDataset, component: ComponentType, field: str, status_field: str -) -> List[NotIdenticalError]: +) -> list[NotIdenticalError]: """ Check that for all records of a particular type of component, the values in the 'field' column are identical. Only entries are checked where the 'status' field is not 0. @@ -436,7 +436,7 @@ def all_enabled_identical( ) -def all_unique(data: SingleDataset, component: ComponentType, field: str) -> List[NotUniqueError]: +def all_unique(data: SingleDataset, component: ComponentType, field: str) -> list[NotUniqueError]: """ Check that for all records of a particular type of component, the values in the 'field' column are unique within the 'field' column of that component. @@ -460,15 +460,15 @@ def all_unique(data: SingleDataset, component: ComponentType, field: str) -> Lis def all_cross_unique( - data: SingleDataset, fields: List[Tuple[ComponentType, str]], cross_only=True -) -> List[MultiComponentNotUniqueError]: + data: SingleDataset, fields: list[tuple[ComponentType, str]], cross_only=True +) -> list[MultiComponentNotUniqueError]: """ Check that for all records of a particular type of component, the values in the 'field' column are unique within the 'field' column of that component. Args: data (SingleDataset): The input/update data set for all components - fields (List[Tuple[str, str]]): The fields of interest, formatted as + fields (list[tuple[str, str]]): The fields of interest, formatted as [(component_1, field_1), (component_2, field_2)] cross_only (bool, optional): Do not include duplicates within a single field. It is advised that you use all_unique() to explicitly check uniqueness within a single field. @@ -477,7 +477,7 @@ def all_cross_unique( A list containing zero or one MultiComponentNotUniqueError, listing all fields and ids where the value was not unique between the fields. """ - all_values: Dict[int, List[Tuple[Tuple[ComponentType, str], int]]] = {} + all_values: dict[int, list[tuple[tuple[ComponentType, str], int]]] = {} duplicate_ids = set() for component, field in fields: for obj_id, value in zip(data[component]["id"], data[component][field]): @@ -496,8 +496,8 @@ def all_cross_unique( def all_valid_enum_values( - data: SingleDataset, component: ComponentType, field: str, enum: Union[Type[Enum], List[Type[Enum]]] -) -> List[InvalidEnumValueError]: + data: SingleDataset, component: ComponentType, field: str, enum: Type[Enum] | list[Type[Enum]] +) -> list[InvalidEnumValueError]: """ Check that for all records of a particular type of component, the values in the 'field' column are valid values for the supplied enum class. Returns an empty list on success, or a list containing a single error object on failure. @@ -506,13 +506,13 @@ def all_valid_enum_values( data (SingleDataset): The input/update data set for all components component (ComponentType): The component of interest field (str): The field of interest - enum (Type[Enum] | List[Type[Enum]]): The enum type to validate against, or a list of such enum types + enum (Type[Enum] | list[Type[Enum]]): The enum type to validate against, or a list of such enum types Returns: A list containing zero or one InvalidEnumValueError, listing all ids where the value in the field of interest was not a valid value in the supplied enum type. """ - enums: List[Type[Enum]] = enum if isinstance(enum, list) else [enum] + enums: list[Type[Enum]] = enum if isinstance(enum, list) else [enum] valid = {nan_type(component, field)} for enum_type in enums: @@ -530,25 +530,25 @@ def all_valid_associated_enum_values( # pylint: disable=too-many-arguments component: ComponentType, field: str, ref_object_id_field: str, - ref_components: List[ComponentType], - enum: Union[Type[Enum], List[Type[Enum]]], + ref_components: list[ComponentType], + enum: Type[Enum] | list[Type[Enum]], **filters: Any, -) -> List[InvalidAssociatedEnumValueError]: +) -> list[InvalidAssociatedEnumValueError]: """ Args: data (SingleDataset): The input/update data set for all components component (ComponentType): The component of interest field (str): The field of interest ref_object_id_field (str): The field that contains the referenced component ids - ref_components (List[ComponentType]): The component or components in which we want to look for ids - enum (Type[Enum] | List[Type[Enum]]): The enum type to validate against, or a list of such enum types + ref_components (list[ComponentType]): The component or components in which we want to look for ids + enum (Type[Enum] | list[Type[Enum]]): The enum type to validate against, or a list of such enum types **filters: One or more filters on the dataset. E.g. regulated_object="transformer". Returns: A list containing zero or one InvalidAssociatedEnumValueError, listing all ids where the value in the field of interest was not a valid value in the supplied enum type. """ - enums: List[Type[Enum]] = enum if isinstance(enum, list) else [enum] + enums: list[Type[Enum]] = enum if isinstance(enum, list) else [enum] valid_ids = get_valid_ids(data=data, ref_components=ref_components) mask = np.logical_and( @@ -571,9 +571,9 @@ def all_valid_ids( data: SingleDataset, component: ComponentType, field: str, - ref_components: Union[ComponentType, List[ComponentType]], + ref_components: ComponentType | list[ComponentType], **filters: Any, -) -> List[InvalidIdError]: +) -> list[InvalidIdError]: """ For a column which should contain object identifiers (ids), check if the id exists in the data, for a specific set of reference component types. E.g. is the from_node field of each line referring to an existing node id? @@ -600,7 +600,7 @@ def all_valid_ids( return [] -def all_boolean(data: SingleDataset, component: ComponentType, field: str) -> List[NotBooleanError]: +def all_boolean(data: SingleDataset, component: ComponentType, field: str) -> list[NotBooleanError]: """ Check that for all records of a particular type of component, the values in the 'field' column are valid boolean values, i.e. 0 or 1. Returns an empty list on success, or a list containing a single error object on failure. @@ -623,7 +623,7 @@ def all_boolean(data: SingleDataset, component: ComponentType, field: str) -> Li def all_not_two_values_zero( data: SingleDataset, component: ComponentType, field_1: str, field_2: str -) -> List[TwoValuesZeroError]: +) -> list[TwoValuesZeroError]: """ Check that for all records of a particular type of component, the values in the 'field_1' and 'field_2' column are not both zero. Returns an empty list on success, or a list containing a single error object on failure. @@ -648,7 +648,7 @@ def all_not_two_values_zero( def all_not_two_values_equal( data: SingleDataset, component: ComponentType, field_1: str, field_2: str -) -> List[SameValueError]: +) -> list[SameValueError]: """ Check that for all records of a particular type of component, the values in the 'field_1' and 'field_2' column are not both the same value. E.g. from_node and to_node of a line. Returns an empty list on success, or a list @@ -674,7 +674,7 @@ def all_not_two_values_equal( def all_ids_exist_in_data_set( data: SingleDataset, ref_data: SingleDataset, component: ComponentType, ref_name: str -) -> List[IdNotInDatasetError]: +) -> list[IdNotInDatasetError]: """ Check that for all records of a particular type of component, the ids exist in the reference data set. @@ -694,7 +694,7 @@ def all_ids_exist_in_data_set( return [] -def all_finite(data: SingleDataset, exceptions: Optional[Dict[ComponentType, List[str]]] = None) -> List[InfinityError]: +def all_finite(data: SingleDataset, exceptions: Optional[dict[ComponentType, list[str]]] = None) -> list[InfinityError]: """ Check that for all records in all component, the values in all columns are finite value, i.e. float values other than inf, or -inf. Nan values are ignored, as in all other comparison functions. You can use non_missing() to @@ -729,9 +729,9 @@ def all_finite(data: SingleDataset, exceptions: Optional[Dict[ComponentType, Lis def none_missing( data: SingleDataset, component: ComponentType, - fields: Union[List[Union[str, List[str]]], str, List[str]], + fields: list[str | list[str]] | str | list[str], index: int = 0, -) -> List[MissingValueError]: +) -> list[MissingValueError]: """ Check that for all records of a particular type of component, the values in the 'fields' columns are not NaN. Returns an empty list on success, or a list containing a single error object on failure. @@ -765,7 +765,7 @@ def none_missing( return errors -def valid_p_q_sigma(data: SingleDataset, component: ComponentType) -> List[MultiFieldValidationError]: +def valid_p_q_sigma(data: SingleDataset, component: ComponentType) -> list[MultiFieldValidationError]: """ Check validity of the pair `(p_sigma, q_sigma)` for 'sym_power_sensor' and 'asym_power_sensor'. @@ -799,7 +799,7 @@ def valid_p_q_sigma(data: SingleDataset, component: ComponentType) -> List[Multi def all_valid_clocks( data: SingleDataset, component: ComponentType, clock_field: str, winding_from_field: str, winding_to_field: str -) -> List[TransformerClockError]: +) -> list[TransformerClockError]: """ Custom validation rule: Odd clock number is only allowed for Dy(n) or Y(N)d configuration. @@ -837,7 +837,7 @@ def all_valid_clocks( def all_valid_fault_phases( data: SingleDataset, component: ComponentType, fault_type_field: str, fault_phase_field: str -) -> List[FaultPhaseError]: +) -> list[FaultPhaseError]: """ Custom validation rule: Only a subset of fault_phases is supported for each fault type. @@ -854,7 +854,7 @@ def all_valid_fault_phases( fault_types = data[component][fault_type_field] fault_phases = data[component][fault_phase_field] - supported_combinations: Dict[FaultType, List[FaultPhase]] = { + supported_combinations: dict[FaultType, list[FaultPhase]] = { FaultType.three_phase: [FaultPhase.abc, FaultPhase.default_value, FaultPhase.nan], FaultType.single_phase_to_ground: [ FaultPhase.a, @@ -894,16 +894,16 @@ def all_supported_tap_control_side( # pylint: disable=too-many-arguments component: ComponentType, control_side_field: str, regulated_object_field: str, - tap_side_fields: List[Tuple[ComponentType, str]], + tap_side_fields: list[tuple[ComponentType, str]], **filters: Any, -) -> List[UnsupportedTransformerRegulationError]: +) -> list[UnsupportedTransformerRegulationError]: """ Args: data (SingleDataset): The input/update data set for all components component (ComponentType): The component of interest control_side_field (str): The field of interest regulated_object_field (str): The field that contains the regulated component ids - tap_side_fields (List[Tuple[ComponentType, str]]): The fields of interest per regulated component, + tap_side_fields (list[tuple[ComponentType, str]]): The fields of interest per regulated component, formatted as [(component_1, field_1), (component_2, field_2)] **filters: One or more filters on the dataset. E.g. regulated_object="transformer". diff --git a/src/power_grid_model/validation/utils.py b/src/power_grid_model/validation/utils.py index ed2a0347e..ddcaaa305 100644 --- a/src/power_grid_model/validation/utils.py +++ b/src/power_grid_model/validation/utils.py @@ -6,7 +6,7 @@ Utilities used for validation. Only errors_to_string() is intended for end users. """ import re -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional import numpy as np @@ -16,7 +16,7 @@ from power_grid_model.validation.errors import ValidationError -def eval_expression(data: np.ndarray, expression: Union[int, float, str]) -> np.ndarray: +def eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarray: """ Wrapper function that checks the type of the 'expression'. If the expression is a string, it is assumed to be a field expression and the expression is validated. Otherwise it is assumed to be a numerical value and the value @@ -126,10 +126,10 @@ def update_component_data(component: ComponentType, input_data: np.ndarray, upda def errors_to_string( - errors: Union[List[ValidationError], Dict[int, List[ValidationError]], None], + errors: list[ValidationError] | dict[int, list[ValidationError]] | None, name: str = "the data", details: bool = False, - id_lookup: Optional[Union[List[str], Dict[int, str]]] = None, + id_lookup: Optional[list[str] | dict[int, str]] = None, ) -> str: """ Convert a set of errors (list or dict) to a human readable string representation. @@ -162,7 +162,7 @@ def errors_to_string( return msg -def nan_type(component: Union[str, ComponentType], field: str, data_type: DatasetType = DatasetType.input): +def nan_type(component: str | ComponentType, field: str, data_type: DatasetType = DatasetType.input): """ Helper function to retrieve the nan value for a certain field as defined in the power_grid_meta_data. """ @@ -211,7 +211,7 @@ def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[ def set_default_value( - data: SingleDataset, component: ComponentType, field: str, default_value: Union[int, float, np.ndarray] + data: SingleDataset, component: ComponentType, field: str, default_value: int | float | np.ndarray ): """ This function sets the default value in the data that is to be validated, so the default values are included in the @@ -238,7 +238,7 @@ def set_default_value( data[component][field][mask] = default_value -def get_valid_ids(data: SingleDataset, ref_components: Union[ComponentType, List[ComponentType]]) -> List[int]: +def get_valid_ids(data: SingleDataset, ref_components: ComponentType | list[ComponentType]) -> list[int]: """ This function returns the valid IDs specified by all ref_components @@ -247,7 +247,7 @@ def get_valid_ids(data: SingleDataset, ref_components: Union[ComponentType, List ref_components: The component or components in which we want to look for ids Returns: - List[int]: the list of valid IDs + list[int]: the list of valid IDs """ # For convenience, ref_component may be a string and we'll convert it to a 'list' containing that string as it's # single element. diff --git a/src/power_grid_model/validation/validation.py b/src/power_grid_model/validation/validation.py index 3883cfc55..7ac8b19a6 100644 --- a/src/power_grid_model/validation/validation.py +++ b/src/power_grid_model/validation/validation.py @@ -11,7 +11,7 @@ import copy from collections.abc import Sized as ABCSized from itertools import chain -from typing import Dict, List, Optional, Union, cast +from typing import Optional, cast import numpy as np @@ -63,7 +63,7 @@ def validate_input_data( input_data: SingleDataset, calculation_type: Optional[CalculationType] = None, symmetric: bool = True -) -> Optional[List[ValidationError]]: +) -> Optional[list[ValidationError]]: """ Validates the entire input dataset: @@ -87,7 +87,7 @@ def validate_input_data( input_data_copy = copy.deepcopy(input_data) assert_valid_data_structure(input_data_copy, DatasetType.input) - errors: List[ValidationError] = [] + errors: list[ValidationError] = [] errors += validate_required_values(input_data_copy, calculation_type, symmetric) errors += validate_unique_ids_across_components(input_data_copy) errors += validate_values(input_data_copy, calculation_type) @@ -99,7 +99,7 @@ def validate_batch_data( update_data: BatchDataset, calculation_type: Optional[CalculationType] = None, symmetric: bool = True, -) -> Optional[Dict[int, List[ValidationError]]]: +) -> Optional[dict[int, list[ValidationError]]]: """ The input dataset is validated: @@ -129,7 +129,7 @@ def validate_batch_data( """ assert_valid_data_structure(input_data, DatasetType.input) - input_errors: List[ValidationError] = list(validate_unique_ids_across_components(input_data)) + input_errors: list[ValidationError] = list(validate_unique_ids_across_components(input_data)) # Splitting update_data_into_batches may raise TypeErrors and ValueErrors batch_data = convert_batch_dataset_to_batch_list(update_data) @@ -137,7 +137,7 @@ def validate_batch_data( errors = {} for batch, batch_update_data in enumerate(batch_data): assert_valid_data_structure(batch_update_data, DatasetType.update) - id_errors: List[ValidationError] = list(validate_ids_exist(batch_update_data, input_data)) + id_errors: list[ValidationError] = list(validate_ids_exist(batch_update_data, input_data)) batch_errors = input_errors + id_errors if not id_errors: @@ -190,7 +190,7 @@ def assert_valid_data_structure(data: Dataset, data_type: DatasetType) -> None: ) -def validate_unique_ids_across_components(data: SingleDataset) -> List[MultiComponentNotUniqueError]: +def validate_unique_ids_across_components(data: SingleDataset) -> list[MultiComponentNotUniqueError]: """ Checks if all ids in the input dataset are unique @@ -205,8 +205,8 @@ def validate_unique_ids_across_components(data: SingleDataset) -> List[MultiComp def validate_ids_exist( - update_data: Dict[ComponentType, np.ndarray], input_data: SingleDataset -) -> List[IdNotInDatasetError]: + update_data: dict[ComponentType, np.ndarray], input_data: SingleDataset +) -> list[IdNotInDatasetError]: """ Checks if all ids of the components in the update data exist in the input data. This needs to be true, because you can only update existing components. @@ -229,7 +229,7 @@ def validate_ids_exist( def _process_power_sigma_and_p_q_sigma( data: SingleDataset, sensor: ComponentType, - required_list: Dict[Union[ComponentType, str], List[Union[str, List[str]]]], + required_list: dict[ComponentType | str, list[str | list[str]]], ) -> None: """ Helper function to process the required list when both `p_sigma` and `q_sigma` exist @@ -251,7 +251,7 @@ def _process_power_sigma_in_list(_sensor_mask, _power_sigma, _p_sigma, _q_sigma) for sublist, should_remove in zip(_sensor_mask, _mask): if should_remove and "power_sigma" in sublist: - sublist = cast(List[str], sublist) + sublist = cast(list[str], sublist) sublist.remove("power_sigma") if _check_sensor_in_data(data, sensor): @@ -267,7 +267,7 @@ def _process_power_sigma_in_list(_sensor_mask, _power_sigma, _p_sigma, _q_sigma) def validate_required_values( data: SingleDataset, calculation_type: Optional[CalculationType] = None, symmetric: bool = True -) -> List[MissingValueError]: +) -> list[MissingValueError]: """ Checks if all required data is available. @@ -280,7 +280,7 @@ def validate_required_values( An empty list if all required data is available, or a list of MissingValueErrors. """ # Base - required: Dict[Union[ComponentType, str], List[Union[str, List[str]]]] = {"base": ["id"]} + required: dict[ComponentType | str, list[str | list[str]]] = {"base": ["id"]} # Nodes required["node"] = required["base"] + ["u_rated"] @@ -432,7 +432,7 @@ def process_nested_items(component, items, data, results): return list(chain(*results)) -def validate_values(data: SingleDataset, calculation_type: Optional[CalculationType] = None) -> List[ValidationError]: +def validate_values(data: SingleDataset, calculation_type: Optional[CalculationType] = None) -> list[ValidationError]: """ For each component supplied in the data, call the appropriate validation function @@ -444,7 +444,7 @@ def validate_values(data: SingleDataset, calculation_type: Optional[CalculationT An empty list if all required data is valid, or a list of ValidationErrors. """ - errors: List[ValidationError] = list( + errors: list[ValidationError] = list( all_finite( data, { @@ -496,18 +496,18 @@ def validate_values(data: SingleDataset, calculation_type: Optional[CalculationT # pylint: disable=missing-function-docstring -def validate_base(data: SingleDataset, component: ComponentType) -> List[ValidationError]: - errors: List[ValidationError] = list(all_unique(data, component, "id")) +def validate_base(data: SingleDataset, component: ComponentType) -> list[ValidationError]: + errors: list[ValidationError] = list(all_unique(data, component, "id")) return errors -def validate_node(data: SingleDataset) -> List[ValidationError]: +def validate_node(data: SingleDataset) -> list[ValidationError]: errors = validate_base(data, ComponentType.node) errors += all_greater_than_zero(data, ComponentType.node, "u_rated") return errors -def validate_branch(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_branch(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) errors += all_valid_ids(data, component, "from_node", ComponentType.node) errors += all_valid_ids(data, component, "to_node", ComponentType.node) @@ -517,7 +517,7 @@ def validate_branch(data: SingleDataset, component: ComponentType) -> List[Valid return errors -def validate_line(data: SingleDataset) -> List[ValidationError]: +def validate_line(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.line) errors += all_not_two_values_zero(data, ComponentType.line, "r1", "x1") errors += all_not_two_values_zero(data, ComponentType.line, "r0", "x0") @@ -525,7 +525,7 @@ def validate_line(data: SingleDataset) -> List[ValidationError]: return errors -def validate_transformer(data: SingleDataset) -> List[ValidationError]: +def validate_transformer(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.transformer) errors += all_greater_than_zero(data, ComponentType.transformer, "u1") errors += all_greater_than_zero(data, ComponentType.transformer, "u2") @@ -563,7 +563,7 @@ def validate_transformer(data: SingleDataset) -> List[ValidationError]: return errors -def validate_branch3(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_branch3(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) errors += all_valid_ids(data, component, "node_1", ComponentType.node) errors += all_valid_ids(data, component, "node_2", ComponentType.node) @@ -578,7 +578,7 @@ def validate_branch3(data: SingleDataset, component: ComponentType) -> List[Vali # pylint: disable=R0915 -def validate_three_winding_transformer(data: SingleDataset) -> List[ValidationError]: +def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationError]: errors = validate_branch3(data, ComponentType.three_winding_transformer) errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u1") errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u2") @@ -791,14 +791,14 @@ def validate_three_winding_transformer(data: SingleDataset) -> List[ValidationEr return errors -def validate_appliance(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_appliance(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) errors += all_boolean(data, component, "status") errors += all_valid_ids(data, component, "node", ComponentType.node) return errors -def validate_source(data: SingleDataset) -> List[ValidationError]: +def validate_source(data: SingleDataset) -> list[ValidationError]: errors = validate_appliance(data, ComponentType.source) errors += all_greater_than_zero(data, ComponentType.source, "u_ref") errors += all_greater_than_zero(data, ComponentType.source, "sk") @@ -807,18 +807,18 @@ def validate_source(data: SingleDataset) -> List[ValidationError]: return errors -def validate_generic_load_gen(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_generic_load_gen(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_appliance(data, component) errors += all_valid_enum_values(data, component, "type", LoadGenType) return errors -def validate_shunt(data: SingleDataset) -> List[ValidationError]: +def validate_shunt(data: SingleDataset) -> list[ValidationError]: errors = validate_appliance(data, ComponentType.shunt) return errors -def validate_generic_voltage_sensor(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_generic_voltage_sensor(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) errors += all_greater_than_zero(data, component, "u_sigma") errors += all_greater_than_zero(data, component, "u_measured") @@ -826,7 +826,7 @@ def validate_generic_voltage_sensor(data: SingleDataset, component: ComponentTyp return errors -def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) errors += all_greater_than_zero(data, component, "power_sigma") errors += all_valid_enum_values(data, component, "measured_terminal_type", MeasuredTerminalType) @@ -923,7 +923,7 @@ def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) return errors -def validate_fault(data: SingleDataset) -> List[ValidationError]: +def validate_fault(data: SingleDataset) -> list[ValidationError]: errors = validate_base(data, ComponentType.fault) errors += all_boolean(data, ComponentType.fault, "status") errors += all_valid_enum_values(data, ComponentType.fault, "fault_type", FaultType) @@ -936,7 +936,7 @@ def validate_fault(data: SingleDataset) -> List[ValidationError]: return errors -def validate_regulator(data: SingleDataset, component: ComponentType) -> List[ValidationError]: +def validate_regulator(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) errors += all_valid_ids( data, @@ -947,7 +947,7 @@ def validate_regulator(data: SingleDataset, component: ComponentType) -> List[Va return errors -def validate_transformer_tap_regulator(data: SingleDataset) -> List[ValidationError]: +def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationError]: errors = validate_regulator(data, ComponentType.transformer_tap_regulator) errors += all_boolean(data, ComponentType.transformer_tap_regulator, "status") errors += all_valid_enum_values( diff --git a/tests/unit/test_0Z_model_validation.py b/tests/unit/test_0Z_model_validation.py index a26b81ccd..db831e94f 100644 --- a/tests/unit/test_0Z_model_validation.py +++ b/tests/unit/test_0Z_model_validation.py @@ -4,7 +4,7 @@ from copy import copy from pathlib import Path -from typing import Callable, Dict, List, Tuple +from typing import Callable, Tuple import numpy as np import pytest @@ -14,7 +14,7 @@ from .utils import EXPORT_OUTPUT, PowerGridModelWithExt, compare_result, import_case_data, pytest_cases, save_json_data -calculation_function_arguments_map: Dict[str, Tuple[Callable, List[str]]] = { +calculation_function_arguments_map: dict[str, Tuple[Callable, list[str]]] = { "power_flow": ( PowerGridModelWithExt.calculate_power_flow_with_ext, [ @@ -59,11 +59,11 @@ } -def supported_kwargs(kwargs, supported: List[str]): +def supported_kwargs(kwargs, supported: list[str]): return {key: value for key, value in kwargs.items() if key in supported} -def get_kwargs(sym: bool, calculation_type: str, calculation_method: str, params: Dict, **extra_kwargs) -> Dict: +def get_kwargs(sym: bool, calculation_type: str, calculation_method: str, params: dict, **extra_kwargs) -> dict: base_kwargs = {"symmetric": sym, "calculation_method": calculation_method} for key, value in params.items(): if key not in base_kwargs: @@ -97,7 +97,7 @@ def test_single_validation( calculation_method: str, rtol: float, atol: float, - params: Dict, + params: dict, ): # Initialization case_data = import_case_data(case_path, calculation_type=calculation_type, sym=sym) @@ -148,7 +148,7 @@ def test_batch_validation( calculation_method: str, rtol: float, atol: float, - params: Dict, + params: dict, ): # Initialization case_data = import_case_data(case_path, calculation_type=calculation_type, sym=sym) diff --git a/tests/unit/test_power_grid_model.py b/tests/unit/test_power_grid_model.py index 6a9227edb..eb9de80b9 100644 --- a/tests/unit/test_power_grid_model.py +++ b/tests/unit/test_power_grid_model.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MPL-2.0 from copy import copy -from typing import Dict import numpy as np import pytest diff --git a/tests/unit/test_serialization.py b/tests/unit/test_serialization.py index 15b66cc2b..3400b981a 100644 --- a/tests/unit/test_serialization.py +++ b/tests/unit/test_serialization.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: MPL-2.0 import json -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Mapping, Optional import msgpack import numpy as np @@ -269,7 +269,7 @@ def assert_almost_equal(value: np.ndarray, reference: Any): def assert_scenario_correct( deserialized_dataset: Mapping[ComponentType, np.ndarray], serialized_dataset: Mapping[str, Any], - sparse_components: List[ComponentType], + sparse_components: list[ComponentType], ): for key in serialized_dataset["data"]: if key not in deserialized_dataset: @@ -294,7 +294,7 @@ def assert_scenario_correct( def assert_serialization_correct( - deserialized_dataset: Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]], + deserialized_dataset: Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]], serialized_dataset: Mapping[str, Any], ): """Assert the dataset correctly reprensents the input data.""" diff --git a/tests/unit/utils.py b/tests/unit/utils.py index 0eb7d0c29..18ec9cb22 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -6,7 +6,7 @@ import os import re from pathlib import Path -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional import numpy as np import pytest @@ -93,7 +93,7 @@ def get_output_type(calculation_type: str, sym: bool) -> str: return "asym_output" -def get_test_case_paths(calculation_type: str, test_cases: Optional[List[str]] = None) -> Dict[str, Path]: +def get_test_case_paths(calculation_type: str, test_cases: Optional[list[str]] = None) -> dict[str, Path]: """get a list of all cases, directories in validation datasets""" calculation_type_dir = DATA_PATH / calculation_type test_case_paths = { @@ -173,7 +173,7 @@ def _add_cases(case_dir: Path, calculation_type: str, **kwargs): ) -def pytest_cases(get_batch_cases: bool = False, data_dir: Optional[str] = None, test_cases: Optional[List[str]] = None): +def pytest_cases(get_batch_cases: bool = False, data_dir: Optional[str] = None, test_cases: Optional[list[str]] = None): if data_dir is not None: relevant_calculations = [data_dir] else: @@ -198,7 +198,7 @@ def bool_params(true_id: str, false_id: Optional[str] = None, **kwargs): yield pytest.param(True, **kwargs, id=true_id) -def dict_params(params: Dict[Any, str], **kwargs): +def dict_params(params: dict[Any, str], **kwargs): for value, param_id in params.items(): yield pytest.param(value, **kwargs, id=param_id) @@ -224,7 +224,7 @@ def save_json_data(json_file: str, data: Dataset): json_serialize_to_file(data_file, data) -def compare_result(actual: SingleDataset, expected: SingleDataset, rtol: float, atol: Union[float, Dict[str, float]]): +def compare_result(actual: SingleDataset, expected: SingleDataset, rtol: float, atol: float | dict[str, float]): for key, expected_data in expected.items(): for col_name in expected_data.dtype.names: actual_col = actual[key][col_name] diff --git a/tests/unit/validation/test_batch_validation.py b/tests/unit/validation/test_batch_validation.py index cacf35a55..9047e2e61 100644 --- a/tests/unit/validation/test_batch_validation.py +++ b/tests/unit/validation/test_batch_validation.py @@ -2,7 +2,6 @@ # # SPDX-License-Identifier: MPL-2.0 -from typing import Dict import numpy as np import pytest @@ -13,7 +12,7 @@ @pytest.fixture -def input_data() -> Dict[str, np.ndarray]: +def input_data() -> dict[str, np.ndarray]: node = initialize_array("input", "node", 4) node["id"] = [1, 2, 3, 4] node["u_rated"] = 10.5e3 @@ -42,7 +41,7 @@ def input_data() -> Dict[str, np.ndarray]: @pytest.fixture -def batch_data() -> Dict[str, np.ndarray]: +def batch_data() -> dict[str, np.ndarray]: line = initialize_array("update", "line", (3, 2)) line["id"] = [[5, 6], [6, 7], [7, 5]] line["from_status"] = [[1, 1], [1, 1], [1, 1]] diff --git a/tests/unit/validation/test_input_validation.py b/tests/unit/validation/test_input_validation.py index 7a02be608..4e610fc7b 100644 --- a/tests/unit/validation/test_input_validation.py +++ b/tests/unit/validation/test_input_validation.py @@ -2,7 +2,6 @@ # # SPDX-License-Identifier: MPL-2.0 -from typing import Dict import numpy as np import pytest @@ -40,7 +39,7 @@ @pytest.fixture -def input_data() -> Dict[ComponentType, np.ndarray]: +def input_data() -> dict[ComponentType, np.ndarray]: node = initialize_array(DatasetType.input, ComponentType.node, 4) node["id"] = [0, 2, 1, 2] node["u_rated"] = [10.5e3, 10.5e3, 0, 10.5e3] diff --git a/tests/unit/validation/test_validation_functions.py b/tests/unit/validation/test_validation_functions.py index 078dd2b0e..2023cefbd 100644 --- a/tests/unit/validation/test_validation_functions.py +++ b/tests/unit/validation/test_validation_functions.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MPL-2.0 from itertools import product -from typing import List, Union from unittest.mock import ANY, MagicMock, patch import numpy as np @@ -640,7 +639,7 @@ def test_validate_generic_power_sensor__all_terminal_types( @patch("power_grid_model.validation.validation.all_valid_enum_values", new=MagicMock()) @patch("power_grid_model.validation.validation.all_valid_ids") def test_validate_generic_power_sensor__terminal_types( - all_valid_ids: MagicMock, ref_component: Union[str, List[str]], measured_terminal_type: MeasuredTerminalType + all_valid_ids: MagicMock, ref_component: str | list[str], measured_terminal_type: MeasuredTerminalType ): # Act validate_generic_power_sensor(data={}, component="") # type: ignore From 5243d1c15467a8069edb36ab38ef2704112c4ce9 Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Mon, 5 Aug 2024 09:54:14 +0200 Subject: [PATCH 2/3] Double check Signed-off-by: Zhen Wang --- docs/examples/Validation Examples.ipynb | 4 ++-- docs/user_manual/data-validator.md | 4 ++-- setup.py | 1 - src/power_grid_model/core/data_handling.py | 4 ++-- src/power_grid_model/core/power_grid_model.py | 14 +++++++------- src/power_grid_model/validation/errors.py | 4 ++-- tests/unit/test_0Z_model_validation.py | 4 ++-- 7 files changed, 17 insertions(+), 18 deletions(-) diff --git a/docs/examples/Validation Examples.ipynb b/docs/examples/Validation Examples.ipynb index aecc4c70f..02e1654b9 100644 --- a/docs/examples/Validation Examples.ipynb +++ b/docs/examples/Validation Examples.ipynb @@ -45,10 +45,10 @@ " component: ComponentType | list[ComponentType]\n", " \n", " # Field(s): e.g. \"id\" or [\"line_from\", \"line_to\"] or [(ComponentType.node, \"id\"), (ComponentType.line, \"id\")]\n", - " field: str | list[str] | list[Tuple[ComponentType, str]]\n", + " field: str | list[str] | list[tuple[ComponentType, str]]\n", "\n", " # IDs: e.g. [1, 2, 3] or [(ComponentType.node, 1), (ComponentType.line, 1)]\n", - " ids: list[int] | list[Tuple[ComponentType, int]] = [] \n", + " ids: list[int] | list[tuple[ComponentType, int]] = [] \n", " \n", "```\n", "\n", diff --git a/docs/user_manual/data-validator.md b/docs/user_manual/data-validator.md index 2fa480225..0624d7359 100644 --- a/docs/user_manual/data-validator.md +++ b/docs/user_manual/data-validator.md @@ -36,10 +36,10 @@ class ValidationError: component: str | list[str] # Field(s): e.g. "id" or ["line_from", "line_to"] or [("node", "id"), ("line", "id")] - field: str | list[str] | list[Tuple[str, str]] + field: str | list[str] | list[tuple[str, str]] # IDs: e.g. [1, 2, 3] or [("node", 1), ("line", 1)] - ids: list[int] | list[Tuple[str, int]] = [] + ids: list[int] | list[tuple[str, int]] = [] ``` ## Validation functions diff --git a/setup.py b/setup.py index b5b85e687..91d3544df 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,6 @@ import shutil from itertools import chain from pathlib import Path -from typing import List # noinspection PyPackageRequirements from setuptools import Extension, setup diff --git a/src/power_grid_model/core/data_handling.py b/src/power_grid_model/core/data_handling.py index 33608d1e8..039517d6f 100644 --- a/src/power_grid_model/core/data_handling.py +++ b/src/power_grid_model/core/data_handling.py @@ -8,7 +8,7 @@ from enum import Enum -from typing import Mapping, Tuple +from typing import Mapping import numpy as np @@ -137,7 +137,7 @@ def create_output_data( for name, count in all_component_count.items(): # shape if is_batch: - shape: Tuple[int] | Tuple[int, int] = (batch_size, count) + shape: tuple[int] | tuple[int, int] = (batch_size, count) else: shape = (count,) result_dict[name] = initialize_array(output_type.value, name, shape=shape, empty=True) diff --git a/src/power_grid_model/core/power_grid_model.py b/src/power_grid_model/core/power_grid_model.py index ab0687bb9..0d7645231 100644 --- a/src/power_grid_model/core/power_grid_model.py +++ b/src/power_grid_model/core/power_grid_model.py @@ -6,7 +6,7 @@ Main power grid model class """ from enum import IntEnum -from typing import Optional, Set, Type +from typing import Optional, Type import numpy as np @@ -315,7 +315,7 @@ def _calculate_power_flow( calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, update_data: Optional[Dataset] = None, threading: int = -1, - output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, + output_component_types: Optional[set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled, @@ -352,7 +352,7 @@ def _calculate_state_estimation( calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, update_data: Optional[Dataset] = None, threading: int = -1, - output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, + output_component_types: Optional[set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled, @@ -384,7 +384,7 @@ def _calculate_short_circuit( calculation_method: CalculationMethod | str = CalculationMethod.iec60909, update_data: Optional[Dataset] = None, threading: int = -1, - output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, + output_component_types: Optional[set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum, @@ -421,7 +421,7 @@ def calculate_power_flow( calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, threading: int = -1, - output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, + output_component_types: Optional[set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled, @@ -510,7 +510,7 @@ def calculate_state_estimation( calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, threading: int = -1, - output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, + output_component_types: Optional[set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, ) -> dict[ComponentType, np.ndarray]: @@ -591,7 +591,7 @@ def calculate_short_circuit( calculation_method: CalculationMethod | str = CalculationMethod.iec60909, update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, threading: int = -1, - output_component_types: Optional[Set[ComponentType] | list[ComponentType]] = None, + output_component_types: Optional[set[ComponentType] | list[ComponentType]] = None, continue_on_batch_error: bool = False, decode_error: bool = True, short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum, diff --git a/src/power_grid_model/validation/errors.py b/src/power_grid_model/validation/errors.py index 10586e3c8..e39b2a675 100644 --- a/src/power_grid_model/validation/errors.py +++ b/src/power_grid_model/validation/errors.py @@ -8,7 +8,7 @@ import re from abc import ABC from enum import Enum -from typing import Any, Iterable, Optional, Set, Type +from typing import Any, Iterable, Optional, Type from power_grid_model import ComponentType @@ -205,7 +205,7 @@ class NotIdenticalError(SingleFieldValidationError): _message = "Field {field} is not unique for {n} {objects}: {num_unique} different values." values: list[Any] - unique: Set[Any] + unique: set[Any] num_unique: int def __init__(self, component: ComponentType, field: str, ids: Iterable[int], values: list[Any]): diff --git a/tests/unit/test_0Z_model_validation.py b/tests/unit/test_0Z_model_validation.py index db831e94f..74e3279ab 100644 --- a/tests/unit/test_0Z_model_validation.py +++ b/tests/unit/test_0Z_model_validation.py @@ -4,7 +4,7 @@ from copy import copy from pathlib import Path -from typing import Callable, Tuple +from typing import Callable import numpy as np import pytest @@ -14,7 +14,7 @@ from .utils import EXPORT_OUTPUT, PowerGridModelWithExt, compare_result, import_case_data, pytest_cases, save_json_data -calculation_function_arguments_map: dict[str, Tuple[Callable, list[str]]] = { +calculation_function_arguments_map: dict[str, tuple[Callable, list[str]]] = { "power_flow": ( PowerGridModelWithExt.calculate_power_flow_with_ext, [ From c5491a0aedb74e86e2224381b0fd1e648aaad122 Mon Sep 17 00:00:00 2001 From: Martijn Govers Date: Mon, 5 Aug 2024 10:53:56 +0200 Subject: [PATCH 3/3] remove remaining unused import Signed-off-by: Martijn Govers --- tests/unit/test_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 572fc7679..c7c26a8b1 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MPL-2.0 from pathlib import Path -from typing import Dict from unittest.mock import MagicMock, mock_open, patch import numpy as np