diff --git a/code_generation/meta_data.py b/code_generation/meta_data.py index 17b426c7a..3564e2276 100644 --- a/code_generation/meta_data.py +++ b/code_generation/meta_data.py @@ -5,7 +5,6 @@ # define dataclass for meta data from dataclasses import dataclass -from typing import Optional from dataclasses_json import DataClassJsonMixin @@ -15,19 +14,19 @@ class Attribute(DataClassJsonMixin): data_type: str names: str | list[str] description: str - nan_value: Optional[str] = None + nan_value: str | None = None @dataclass class AttributeClass(DataClassJsonMixin): name: str attributes: list[Attribute] - full_attributes: Optional[list[Attribute]] = None - base: Optional[str] = None + full_attributes: list[Attribute] | None = None + base: str | None = None is_template: bool = False - full_name: Optional[str] = None - specification_names: Optional[list[str]] = None - base_attributes: Optional[dict[str, list[Attribute]]] = None + full_name: str | None = None + specification_names: list[str] | None = None + base_attributes: dict[str, list[Attribute]] | None = None @dataclass diff --git a/src/power_grid_model/_core/error_handling.py b/src/power_grid_model/_core/error_handling.py index 98e231361..758156c97 100644 --- a/src/power_grid_model/_core/error_handling.py +++ b/src/power_grid_model/_core/error_handling.py @@ -7,7 +7,6 @@ """ import re -from typing import Optional import numpy as np @@ -115,7 +114,7 @@ def _interpret_error(message: str, decode_error: bool = True) -> PowerGridError: return PowerGridError(message) -def find_error(batch_size: int = 1, decode_error: bool = True) -> Optional[RuntimeError]: +def find_error(batch_size: int = 1, decode_error: bool = True) -> RuntimeError | None: """ Check if there is an error and return it @@ -171,7 +170,7 @@ def assert_no_error(batch_size: int = 1, decode_error: bool = True): def handle_errors( continue_on_batch_error: bool, batch_size: int = 1, decode_error: bool = True -) -> Optional[PowerGridBatchError]: +) -> PowerGridBatchError | None: """ Handle any errors in the way that is specified. @@ -184,10 +183,10 @@ def handle_errors( error: Any errors previously encountered, unless it was a batch error and continue_on_batch_error was True. Returns: - Optional[PowerGridBatchError]: None if there were no errors, or the previously encountered - error if it was a batch error and continue_on_batch_error was True. + PowerGridBatchError | None: None if there were no errors, or the previously encountered + error if it was a batch error and continue_on_batch_error was True. """ - error: Optional[RuntimeError] = find_error(batch_size=batch_size, decode_error=decode_error) + error: RuntimeError | None = find_error(batch_size=batch_size, decode_error=decode_error) if error is None: return None diff --git a/src/power_grid_model/_core/power_grid_core.py b/src/power_grid_model/_core/power_grid_core.py index 1f1d5f06b..5495e749b 100644 --- a/src/power_grid_model/_core/power_grid_core.py +++ b/src/power_grid_model/_core/power_grid_core.py @@ -11,7 +11,7 @@ from inspect import signature from itertools import chain from pathlib import Path -from typing import Callable, Optional +from typing import Callable from power_grid_model._core.index_integer import IdC, IdxC @@ -206,7 +206,7 @@ class PowerGridCore: """ _handle: HandlePtr - _instance: Optional["PowerGridCore"] = None + _instance: "PowerGridCore | None" = None # singleton of power grid core def __new__(cls, *args, **kwargs): diff --git a/src/power_grid_model/_core/power_grid_dataset.py b/src/power_grid_model/_core/power_grid_dataset.py index aa23052e8..92285cb8d 100644 --- a/src/power_grid_model/_core/power_grid_dataset.py +++ b/src/power_grid_model/_core/power_grid_dataset.py @@ -6,7 +6,7 @@ Power grid model raw dataset handler """ -from typing import Any, Mapping, Optional +from typing import Any, Mapping from power_grid_model._core.buffer_handling import ( BufferProperties, @@ -283,7 +283,7 @@ class CConstDataset: _const_dataset: ConstDatasetPtr _buffer_views: list[CBuffer] - def __new__(cls, data: Dataset, dataset_type: Optional[DatasetType] = None): + def __new__(cls, data: Dataset, dataset_type: DatasetType | None = None): instance = super().__new__(cls) instance._const_dataset = ConstDatasetPtr() diff --git a/src/power_grid_model/_core/power_grid_model.py b/src/power_grid_model/_core/power_grid_model.py index cc60f6864..d4ce2eff1 100644 --- a/src/power_grid_model/_core/power_grid_model.py +++ b/src/power_grid_model/_core/power_grid_model.py @@ -7,7 +7,7 @@ """ from enum import IntEnum -from typing import Optional, Type +from typing import Type import numpy as np @@ -45,11 +45,11 @@ class PowerGridModel: """ _model_ptr: ModelPtr - _all_component_count: Optional[dict[ComponentType, int]] - _batch_error: Optional[PowerGridBatchError] + _all_component_count: dict[ComponentType, int] | None + _batch_error: PowerGridBatchError | None @property - def batch_error(self) -> Optional[PowerGridBatchError]: + def batch_error(self) -> PowerGridBatchError | None: """ Get the batch error object, if present @@ -242,7 +242,7 @@ def _calculate_impl( # pylint: disable=too-many-positional-arguments self, calculation_type: CalculationType, symmetric: bool, - update_data: Optional[Dataset], + update_data: Dataset | None, output_component_types: ComponentAttributeMapping, options: Options, continue_on_batch_error: bool, @@ -310,7 +310,7 @@ def _calculate_power_flow( error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, - update_data: Optional[Dataset] = None, + update_data: Dataset | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -347,7 +347,7 @@ def _calculate_state_estimation( error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, - update_data: Optional[Dataset] = None, + update_data: Dataset | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -379,7 +379,7 @@ def _calculate_short_circuit( self, *, calculation_method: CalculationMethod | str = CalculationMethod.iec60909, - update_data: Optional[Dataset] = None, + update_data: Dataset | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -416,7 +416,7 @@ def calculate_power_flow( error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson, - update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, + update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -514,7 +514,7 @@ def calculate_state_estimation( error_tolerance: float = 1e-8, max_iterations: int = 20, calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear, - update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, + update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, @@ -604,7 +604,7 @@ def calculate_short_circuit( self, *, calculation_method: CalculationMethod | str = CalculationMethod.iec60909, - update_data: Optional[dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset] = None, + update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None, threading: int = -1, output_component_types: ComponentAttributeMapping = None, continue_on_batch_error: bool = False, diff --git a/src/power_grid_model/_utils.py b/src/power_grid_model/_utils.py index 6f241fb2a..4dfbfeb54 100644 --- a/src/power_grid_model/_utils.py +++ b/src/power_grid_model/_utils.py @@ -11,7 +11,7 @@ """ from copy import deepcopy -from typing import Optional, Sequence, cast +from typing import Sequence, cast import numpy as np @@ -218,8 +218,6 @@ def _split_numpy_array_in_batches( Args: data: A 1D or 2D Numpy structured array. A 1D array is a single table / batch, a 2D array is a batch per table. - component: The name of the component to which the data belongs; only used for errors. - attribute [optional]: The name of the attribute to which the data belongs; only used for errors. Returns: A list with a single numpy structured array per batch @@ -239,7 +237,6 @@ def split_dense_batch_data_in_batches( Args: data: A 1D or 2D Numpy structured array. A 1D array is a single table / batch, a 2D array is a batch per table. - component: The name of the component to which the data belongs, only used for errors. batch_size: size of batch Returns: @@ -326,7 +323,7 @@ def convert_dataset_to_python_dataset(data: Dataset) -> PythonDataset: # Check if the dataset is a single dataset or batch dataset # It is batch dataset if it is 2D array or a indptr/data structure - is_batch: Optional[bool] = None + is_batch: bool | None = None for component, array in data.items(): is_dense_batch = isinstance(array, np.ndarray) and array.ndim == 2 is_sparse_batch = isinstance(array, dict) and "indptr" in array and "data" in array diff --git a/src/power_grid_model/utils.py b/src/power_grid_model/utils.py index c724e0ca9..400f444aa 100644 --- a/src/power_grid_model/utils.py +++ b/src/power_grid_model/utils.py @@ -11,7 +11,7 @@ import tempfile import warnings from pathlib import Path -from typing import Optional, cast as cast_type +from typing import cast as cast_type import numpy as np @@ -142,9 +142,9 @@ def json_deserialize_from_file( def json_serialize_to_file( file_path: Path, data: Dataset, - dataset_type: Optional[DatasetType] = None, + dataset_type: DatasetType | None = None, use_compact_list: bool = False, - indent: Optional[int] = 2, + indent: int | None = 2, ): """ Export JSON data in most recent format. @@ -189,7 +189,7 @@ def msgpack_deserialize_from_file( def msgpack_serialize_to_file( - file_path: Path, data: Dataset, dataset_type: Optional[DatasetType] = None, use_compact_list: bool = False + file_path: Path, data: Dataset, dataset_type: DatasetType | None = None, use_compact_list: bool = False ): """ Export msgpack data in most recent format. @@ -234,7 +234,7 @@ def import_json_data(json_file: Path, data_type: str, *args, **kwargs) -> Datase def export_json_data( - json_file: Path, data: Dataset, indent: Optional[int] = 2, compact: bool = False, use_deprecated_format: bool = True + json_file: Path, data: Dataset, indent: int | None = 2, compact: bool = False, use_deprecated_format: bool = True ): """ [deprecated] Export json data in a deprecated serialization format. @@ -268,7 +268,7 @@ def export_json_data( def _compatibility_deprecated_export_json_data( - json_file: Path, data: Dataset, indent: Optional[int] = 2, compact: bool = False + json_file: Path, data: Dataset, indent: int | None = 2, compact: bool = False ): serialized_data = json_serialize(data=data, use_compact_list=compact, indent=-1 if indent is None else indent) old_format_serialized_data = json.dumps(json.loads(serialized_data)["data"]) diff --git a/src/power_grid_model/validation/assertions.py b/src/power_grid_model/validation/assertions.py index 862e45cfe..23ce75a08 100644 --- a/src/power_grid_model/validation/assertions.py +++ b/src/power_grid_model/validation/assertions.py @@ -6,8 +6,6 @@ Helper functions to assert valid data. They basically call validate_input_data or validate_batch_data and raise a ValidationException if the validation results in one or more errors. """ -from typing import Optional - from power_grid_model.data_types import BatchDataset, SingleDataset from power_grid_model.enum import CalculationType from power_grid_model.validation.errors import ValidationError @@ -31,7 +29,7 @@ def __str__(self): def assert_valid_input_data( - input_data: SingleDataset, calculation_type: Optional[CalculationType] = None, symmetric: bool = True + input_data: SingleDataset, calculation_type: CalculationType | None = None, symmetric: bool = True ): """ Validates the entire input dataset: @@ -60,7 +58,7 @@ def assert_valid_input_data( def assert_valid_batch_data( input_data: SingleDataset, update_data: BatchDataset, - calculation_type: Optional[CalculationType] = None, + calculation_type: CalculationType | None = None, symmetric: bool = True, ): """ diff --git a/src/power_grid_model/validation/errors.py b/src/power_grid_model/validation/errors.py index 57c8488aa..840f1794e 100644 --- a/src/power_grid_model/validation/errors.py +++ b/src/power_grid_model/validation/errors.py @@ -8,7 +8,7 @@ import re from abc import ABC from enum import Enum -from typing import Any, Iterable, Optional, Type +from typing import Any, Iterable, Type from power_grid_model import ComponentType @@ -32,18 +32,18 @@ class ValidationError(ABC): """ - component: Optional[ComponentType | list[ComponentType]] = None + component: ComponentType | list[ComponentType] | None = None """ The component, or components, to which the error applies. """ - field: Optional[str | list[str] | list[tuple[ComponentType, str]]] = None + field: str | list[str] | list[tuple[ComponentType, str]] | None = None """ The field, or fields, to which the error applies. A field can also be a tuple (component, field) when multiple components are being addressed. """ - ids: Optional[list[int] | list[tuple[ComponentType, int]]] = None + ids: list[int] | list[tuple[ComponentType, int]] | None = None """ The object identifiers to which the error applies. A field object identifier can also be a tuple (component, id) when multiple components are being addressed. @@ -79,7 +79,7 @@ def _unpack(field: str | tuple[ComponentType, str]) -> str: return self._delimiter.join(_unpack(field) for field in self.field) return _unpack(self.field) if self.field else str(self.field) - def get_context(self, id_lookup: Optional[list[str] | dict[int, str]] = None) -> dict[str, Any]: + def get_context(self, id_lookup: list[str] | dict[int, str] | None = None) -> dict[str, Any]: """ Returns a dictionary that supplies (human readable) information about this error. Each member variable is included in the dictionary. If a function {field_name}_str() exists, the value is overwritten by that function. @@ -129,9 +129,9 @@ class SingleFieldValidationError(ValidationError): _message = "Field {field} is not valid for {n} {objects}." component: ComponentType field: str - ids: Optional[list[int]] + ids: list[int] | None - def __init__(self, component: ComponentType, field: str, ids: Optional[Iterable[int]]): + def __init__(self, component: ComponentType, field: str, ids: Iterable[int] | None): """ Args: component: Component name @@ -325,9 +325,9 @@ def __init__( # pylint: disable=too-many-arguments self, component: ComponentType, field: str, - ids: Optional[list[int]] = None, - ref_components: Optional[ComponentType | list[ComponentType]] = None, - filters: Optional[dict[str, Any]] = None, + ids: list[int] | None = None, + ref_components: ComponentType | list[ComponentType] | None = None, + filters: dict[str, Any] | None = None, ): # pylint: disable=too-many-positional-arguments super().__init__(component=component, field=field, ids=ids) diff --git a/src/power_grid_model/validation/rules.py b/src/power_grid_model/validation/rules.py index b506fe1ad..6f603b4a2 100644 --- a/src/power_grid_model/validation/rules.py +++ b/src/power_grid_model/validation/rules.py @@ -35,7 +35,7 @@ """ from enum import Enum -from typing import Any, Callable, Optional, Type, TypeVar +from typing import Any, Callable, Type, TypeVar import numpy as np @@ -103,7 +103,7 @@ def all_greater_than_or_equal_to_zero( data: SingleDataset, component: ComponentType, field: str, - default_value: Optional[np.ndarray | int | float] = None, + default_value: np.ndarray | int | float | None = None, ) -> list[NotGreaterOrEqualError]: """ Check that for all records of a particular type of component, the values in the 'field' column are greater than, @@ -113,7 +113,7 @@ def all_greater_than_or_equal_to_zero( data (SingleDataset): The input/update data set for all components component (ComponentType) The component of interest field (str): The field of interest - default_value (Optional[np.ndarray | int | float], optional): Some values are not required, but will + default_value (np.ndarray | int | float | None, optional): Some values are not required, but will receive a default value in the C++ core. To do a proper input validation, these default values should be included in the validation. It can be a fixed value for the entire column (int/float) or be different for each element (np.ndarray). @@ -156,7 +156,7 @@ def all_greater_or_equal( component: ComponentType, field: str, ref_value: int | float | str, - default_value: Optional[np.ndarray | int | float] = None, + default_value: np.ndarray | int | float | None = None, ) -> list[NotGreaterOrEqualError]: """ Check that for all records of a particular type of component, the values in the 'field' column are greater than, @@ -249,7 +249,7 @@ def all_between( # pylint: disable=too-many-positional-arguments field: str, ref_value_1: int | float | str, ref_value_2: int | float | str, - default_value: Optional[np.ndarray | int | float] = None, + default_value: np.ndarray | int | float | None = None, ) -> list[NotBetweenError]: """ Check that for all records of a particular type of component, the values in the 'field' column are (exclusively) @@ -290,8 +290,8 @@ def all_between_or_at( # pylint: disable=too-many-positional-arguments field: str, ref_value_1: int | float | str, ref_value_2: int | float | str, - default_value_1: Optional[np.ndarray | int | float] = None, - default_value_2: Optional[np.ndarray | int | float] = None, + default_value_1: np.ndarray | int | float | None = None, + default_value_2: np.ndarray | int | float | None = None, ) -> list[NotBetweenOrAtError]: """ Check that for all records of a particular type of component, the values in the 'field' column are inclusively @@ -341,8 +341,8 @@ def none_match_comparison( # pylint: disable=too-many-arguments compare_fn: Callable, ref_value: ComparisonError.RefType, error: Type[CompError] = ComparisonError, # type: ignore - default_value_1: Optional[np.ndarray | int | float] = None, - default_value_2: Optional[np.ndarray | int | float] = None, + default_value_1: np.ndarray | int | float | None = None, + default_value_2: np.ndarray | int | float | None = None, ) -> list[CompError]: # pylint: disable=too-many-positional-arguments """ @@ -719,7 +719,7 @@ def ids_valid_in_update_data_set( return [] -def all_finite(data: SingleDataset, exceptions: Optional[dict[ComponentType, list[str]]] = None) -> list[InfinityError]: +def all_finite(data: SingleDataset, exceptions: dict[ComponentType, list[str]] | None = None) -> list[InfinityError]: """ Check that for all records in all component, the values in all columns are finite value, i.e. float values other than inf, or -inf. Nan values are ignored, as in all other comparison functions. You can use non_missing() to diff --git a/src/power_grid_model/validation/utils.py b/src/power_grid_model/validation/utils.py index 2d2df85b8..247031e79 100644 --- a/src/power_grid_model/validation/utils.py +++ b/src/power_grid_model/validation/utils.py @@ -6,7 +6,7 @@ Utilities used for validation. Only errors_to_string() is intended for end users. """ import re -from typing import Any, Optional, cast +from typing import Any, cast import numpy as np @@ -157,7 +157,7 @@ def errors_to_string( errors: list[ValidationError] | dict[int, list[ValidationError]] | None, name: str = "the data", details: bool = False, - id_lookup: Optional[list[str] | dict[int, str]] = None, + id_lookup: list[str] | dict[int, str] | None = None, ) -> str: """ Convert a set of errors (list or dict) to a human readable string representation. @@ -198,7 +198,7 @@ def _nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = return power_grid_meta_data[data_type][component].nans[field] -def _get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[int] = None) -> np.ndarray: +def _get_indexer(source: np.ndarray, target: np.ndarray, default_value: int | None = None) -> np.ndarray: """ Given array of values from a source and a target dataset. Find the position of each value in the target dataset in the context of the source dataset. @@ -214,7 +214,7 @@ def _get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional Args: source: array of values in the source dataset target: array of values in the target dataset - default_value: Optional. the default index to provide for target values not in source + default_value (optional): the default index to provide for target values not in source Returns: np.ndarray: array of positions of the values from target dataset in the source dataset diff --git a/src/power_grid_model/validation/validation.py b/src/power_grid_model/validation/validation.py index 10dba4a2e..5e9fca790 100644 --- a/src/power_grid_model/validation/validation.py +++ b/src/power_grid_model/validation/validation.py @@ -14,7 +14,7 @@ import copy from collections.abc import Sized as ABCSized from itertools import chain -from typing import Optional, cast +from typing import cast import numpy as np @@ -69,8 +69,8 @@ def validate_input_data( - input_data: SingleDataset, calculation_type: Optional[CalculationType] = None, symmetric: bool = True -) -> Optional[list[ValidationError]]: + input_data: SingleDataset, calculation_type: CalculationType | None = None, symmetric: bool = True +) -> list[ValidationError] | None: """ Validates the entire input dataset: @@ -108,9 +108,9 @@ def validate_input_data( def validate_batch_data( input_data: SingleDataset, update_data: BatchDataset, - calculation_type: Optional[CalculationType] = None, + calculation_type: CalculationType | None = None, symmetric: bool = True, -) -> Optional[dict[int, list[ValidationError]]]: +) -> dict[int, list[ValidationError]] | None: """ The input dataset is validated: @@ -285,7 +285,7 @@ def _process_power_sigma_in_list(_sensor_mask, _power_sigma, _p_sigma, _q_sigma) def validate_required_values( - data: SingleDataset, calculation_type: Optional[CalculationType] = None, symmetric: bool = True + data: SingleDataset, calculation_type: CalculationType | None = None, symmetric: bool = True ) -> list[MissingValueError]: """ Checks if all required data is available. @@ -451,7 +451,7 @@ def process_nested_items(component, items, data, results): return list(chain(*results)) -def validate_values(data: SingleDataset, calculation_type: Optional[CalculationType] = None) -> list[ValidationError]: +def validate_values(data: SingleDataset, calculation_type: CalculationType | None = None) -> list[ValidationError]: """ For each component supplied in the data, call the appropriate validation function diff --git a/tests/unit/test_serialization.py b/tests/unit/test_serialization.py index e7015bcba..c805c3881 100644 --- a/tests/unit/test_serialization.py +++ b/tests/unit/test_serialization.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: MPL-2.0 import json -from typing import Any, Mapping, Optional +from typing import Any, Mapping import msgpack import numpy as np @@ -16,7 +16,7 @@ from power_grid_model.utils import json_deserialize, json_serialize, msgpack_deserialize, msgpack_serialize -def to_json(data, raw_buffer: bool = False, indent: Optional[int] = None): +def to_json(data, raw_buffer: bool = False, indent: int | None = None): indent = None if indent is None or indent < 0 else indent separators = (",", ":") if indent is None else None result = json.dumps(data, indent=indent, separators=separators) diff --git a/tests/unit/utils.py b/tests/unit/utils.py index 5f1d19004..6de4a7429 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -6,7 +6,7 @@ import os import re from pathlib import Path -from typing import Any, Optional +from typing import Any import numpy as np import pytest @@ -93,7 +93,7 @@ def get_output_type(calculation_type: str, sym: bool) -> str: return "asym_output" -def get_test_case_paths(calculation_type: str, test_cases: Optional[list[str]] = None) -> dict[str, Path]: +def get_test_case_paths(calculation_type: str, test_cases: list[str] | None = None) -> dict[str, Path]: """get a list of all cases, directories in validation datasets""" calculation_type_dir = DATA_PATH / calculation_type test_case_paths = { @@ -173,7 +173,7 @@ def _add_cases(case_dir: Path, calculation_type: str, **kwargs): ) -def pytest_cases(get_batch_cases: bool = False, data_dir: Optional[str] = None, test_cases: Optional[list[str]] = None): +def pytest_cases(get_batch_cases: bool = False, data_dir: str | None = None, test_cases: list[str] | None = None): if data_dir is not None: relevant_calculations = [data_dir] else: @@ -191,7 +191,7 @@ def pytest_cases(get_batch_cases: bool = False, data_dir: Optional[str] = None, ) -def bool_params(true_id: str, false_id: Optional[str] = None, **kwargs): +def bool_params(true_id: str, false_id: str | None = None, **kwargs): if false_id is None: false_id = f"not-{true_id}" yield pytest.param(False, **kwargs, id=false_id)