Skip to content

Commit

Permalink
Merge pull request #680 from PowerGridModel/feature/add-type-hinting
Browse files Browse the repository at this point in the history
Add type hinting in python API
  • Loading branch information
mgovers authored Aug 5, 2024
2 parents e22b528 + c5491a0 commit 7e01723
Show file tree
Hide file tree
Showing 29 changed files with 286 additions and 339 deletions.
5 changes: 2 additions & 3 deletions code_generation/code_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

from copy import deepcopy
from pathlib import Path
from typing import Dict, List

from jinja2 import Environment, FileSystemLoader
from meta_data import AllDatasetMapData, AttributeClass, DatasetMapData, DatasetMetaData
Expand All @@ -28,7 +27,7 @@ def _data_type_nan(data_type: str):


class CodeGenerator:
all_classes: Dict[str, AttributeClass]
all_classes: dict[str, AttributeClass]
base_output_path: Path

def __init__(self, base_output_path: Path) -> None:
Expand Down Expand Up @@ -98,7 +97,7 @@ def render_attribute_classes(self, template_path: Path, data_path: Path, output_
def render_dataset_class_maps(self, template_path: Path, data_path: Path, output_path: Path):
with open(data_path) as data_file:
json_data = data_file.read()
dataset_meta_data: List[DatasetMapData] = AllDatasetMapData.schema().loads(json_data).all_datasets
dataset_meta_data: list[DatasetMapData] = AllDatasetMapData.schema().loads(json_data).all_datasets

# create list
all_map = {}
Expand Down
20 changes: 10 additions & 10 deletions code_generation/meta_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,51 +5,51 @@
# define dataclass for meta data

from dataclasses import dataclass
from typing import Dict, List, Optional, Union
from typing import Optional

from dataclasses_json import DataClassJsonMixin


@dataclass
class Attribute(DataClassJsonMixin):
data_type: str
names: Union[str, List[str]]
names: str | list[str]
description: str
nan_value: Optional[str] = None


@dataclass
class AttributeClass(DataClassJsonMixin):
name: str
attributes: List[Attribute]
full_attributes: Optional[List[Attribute]] = None
attributes: list[Attribute]
full_attributes: Optional[list[Attribute]] = None
base: Optional[str] = None
is_template: bool = False
full_name: Optional[str] = None
specification_names: Optional[List[str]] = None
base_attributes: Optional[Dict[str, List[Attribute]]] = None
specification_names: Optional[list[str]] = None
base_attributes: Optional[dict[str, list[Attribute]]] = None


@dataclass
class DatasetMetaData(DataClassJsonMixin):
name: str
include_guard: str
classes: List[AttributeClass]
classes: list[AttributeClass]


@dataclass
class ObjectMapData(DataClassJsonMixin):
names: List[str]
names: list[str]
class_name: str


@dataclass
class DatasetMapData(DataClassJsonMixin):
name: str
is_template: bool
components: List[ObjectMapData]
components: list[ObjectMapData]


@dataclass
class AllDatasetMapData(DataClassJsonMixin):
all_datasets: List[DatasetMapData]
all_datasets: list[DatasetMapData]
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
{%- set components = all_map['input'].keys() %}

from enum import Enum, EnumMeta
from typing import Any, Dict, Mapping
from typing import Any, Mapping

# pylint: disable=invalid-name

Expand Down Expand Up @@ -68,7 +68,7 @@ def _str_to_datatype(data_type: Any) -> DatasetType:
return data_type


def _map_to_datatypes(data: Mapping[Any, Any]) -> Dict[DatasetType, Any]:
def _map_to_datatypes(data: Mapping[Any, Any]) -> dict[DatasetType, Any]:
"""Helper function to map datatype str keys to DatasetType."""
return {_str_to_datatype(key): value for key, value in data.items()}

Expand All @@ -80,7 +80,7 @@ def _str_to_component_type(component: Any) -> ComponentType:
return component


def _map_to_component_types(data: Mapping[Any, Any]) -> Dict[ComponentType, Any]:
def _map_to_component_types(data: Mapping[Any, Any]) -> dict[ComponentType, Any]:
"""Helper function to map componenttype str keys to ComponentType."""
return {_str_to_component_type(key): value for key, value in data.items()}

Expand Down
10 changes: 5 additions & 5 deletions docs/examples/Validation Examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
"# Manual validation\n",
"# validate_input_data() assumes that you won't be using update data in your calculation.\n",
"# validate_batch_data() validates input_data in combination with batch/update data.\n",
"validate_input_data(input_data, calculation_type, symmetric) -> List[ValidationError]\n",
"validate_batch_data(input_data, update_data, calculation_type, symmetric) -> Dict[int, List[ValidationError]]\n",
"validate_input_data(input_data, calculation_type, symmetric) -> list[ValidationError]\n",
"validate_batch_data(input_data, update_data, calculation_type, symmetric) -> dict[int, list[ValidationError]]\n",
"\n",
"# Assertions\n",
"# assert_valid_input_data() and assert_valid_batch_data() raise a ValidationException,\n",
Expand All @@ -42,13 +42,13 @@
"class ValidationError:\n",
" \n",
" # Component(s): e.g. ComponentType.node or [ComponentType.node, ComponentType.line]\n",
" component: Union[ComponentType, List[ComponentType]]\n",
" component: ComponentType | list[ComponentType]\n",
" \n",
" # Field(s): e.g. \"id\" or [\"line_from\", \"line_to\"] or [(ComponentType.node, \"id\"), (ComponentType.line, \"id\")]\n",
" field: Union[str, List[str], List[Tuple[ComponentType, str]]]\n",
" field: str | list[str] | list[tuple[ComponentType, str]]\n",
"\n",
" # IDs: e.g. [1, 2, 3] or [(ComponentType.node, 1), (ComponentType.line, 1)]\n",
" ids: Union[List[int], List[Tuple[ComponentType, int]]] = [] \n",
" ids: list[int] | list[tuple[ComponentType, int]] = [] \n",
" \n",
"```\n",
"\n",
Expand Down
14 changes: 7 additions & 7 deletions docs/user_manual/data-validator.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ Two helper type definitions are used throughout the validation functions, `Input
special types or classes, but merely type hinting aliases:

```python
InputData = Dict[str, np.ndarray]
UpdateData = Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]]
InputData = dict[str, np.ndarray]
UpdateData = dict[str, np.ndarray | dict[str, np.ndarray]]
```

```{seealso}
Expand All @@ -33,13 +33,13 @@ e.g. which object IDs are involved.
class ValidationError:
# Component(s): e.g. "node" or ["node", "line"]
component: Union[str, List[str]]
component: str | list[str]
# Field(s): e.g. "id" or ["line_from", "line_to"] or [("node", "id"), ("line", "id")]
field: Union[str, List[str], List[Tuple[str, str]]]
field: str | list[str] | list[tuple[str, str]]
# IDs: e.g. [1, 2, 3] or [("node", 1), ("line", 1)]
ids: Union[List[int], List[Tuple[str, int]]] = []
ids: list[int] | list[tuple[str, int]] = []
```

## Validation functions
Expand All @@ -51,8 +51,8 @@ and `symmetric: bool`, stating if the data will be used for symmetric or asymmet
allow missing values for unused fields; see the [API reference](../api_reference/python-api-reference.md#enum) for more information. To validate update/batch data `update_data: UpdateData`,
power-grid-model update data, should also be supplied.

- `validate_input_data(input_data, calculation_type, symmetric) -> List[ValidationError]` validates input_data.
- `validate_batch_data(input_data, update_data, calculation_type, symmetric) -> Dict[int, List[ValidationError]]` validates input_data in combination with batch/update data.
- `validate_input_data(input_data, calculation_type, symmetric) -> list[ValidationError]` validates input_data.
- `validate_batch_data(input_data, update_data, calculation_type, symmetric) -> dict[int, list[ValidationError]]` validates input_data in combination with batch/update data.

```{note}
When doing [batch calculations](./calculations.md#batch-calculations), the input data set by itself is not required to be valid, as long as all missing and invalid values are overwritten in all scenarios in the batch data set.
Expand Down
15 changes: 7 additions & 8 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import shutil
from itertools import chain
from pathlib import Path
from typing import List

# noinspection PyPackageRequirements
from setuptools import Extension, setup
Expand All @@ -23,7 +22,7 @@
raise SystemError("Only Windows, Linux, or MacOS is supported!")


def get_required_dependency_include() -> List[str]:
def get_required_dependency_include() -> list[str]:
"""
Get build requirements includes.
Expand All @@ -39,7 +38,7 @@ def get_required_dependency_include() -> List[str]:
return []


def get_pre_installed_header_include() -> List[str]:
def get_pre_installed_header_include() -> list[str]:
"""
Get header files from pybuild_header_dependency, if it is installed
Expand All @@ -55,7 +54,7 @@ def get_pre_installed_header_include() -> List[str]:
return []


def get_conda_include() -> List[str]:
def get_conda_include() -> list[str]:
"""
Get conda include path, if we are inside conda environment
Expand Down Expand Up @@ -151,10 +150,10 @@ def generate_build_ext(pkg_dir: Path, pkg_name: str):
include_dirs += get_pre_installed_header_include()
include_dirs += get_conda_include()
# compiler and link flag
cflags: List[str] = []
lflags: List[str] = []
library_dirs: List[str] = []
libraries: List[str] = []
cflags: list[str] = []
lflags: list[str] = []
library_dirs: list[str] = []
libraries: list[str] = []
sources = [
str(pgm_c / pgm_c / "src" / "handle.cpp"),
str(pgm_c / pgm_c / "src" / "meta_data.cpp"),
Expand Down
10 changes: 4 additions & 6 deletions src/power_grid_model/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"""

from copy import deepcopy
from typing import List, Optional, Union, cast
from typing import Optional, cast

import numpy as np

Expand Down Expand Up @@ -101,7 +101,7 @@ def get_and_verify_batch_sizes(batch_data: BatchDataset) -> int:
"""

n_batch_size = 0
checked_components: List[ComponentType] = []
checked_components: list[ComponentType] = []
for component, data in batch_data.items():
n_component_batch_size = get_batch_size(data)
if checked_components and n_component_batch_size != n_batch_size:
Expand Down Expand Up @@ -150,9 +150,7 @@ def get_batch_size(batch_data: BatchArray) -> int:
return n_batches


def split_numpy_array_in_batches(
data: Union[DenseBatchArray, SingleArray], component: ComponentType
) -> List[np.ndarray]:
def split_numpy_array_in_batches(data: DenseBatchArray | SingleArray, component: ComponentType) -> list[np.ndarray]:
"""
Split a single dense numpy array into one or more batches
Expand All @@ -179,7 +177,7 @@ def split_numpy_array_in_batches(
)


def split_sparse_batches_in_batches(batch_data: SparseBatchArray, component: ComponentType) -> List[np.ndarray]:
def split_sparse_batches_in_batches(batch_data: SparseBatchArray, component: ComponentType) -> list[np.ndarray]:
"""
Split a single numpy array representing, a compressed sparse structure, into one or more batches
Expand Down
16 changes: 8 additions & 8 deletions src/power_grid_model/core/buffer_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@


from dataclasses import dataclass
from typing import Dict, Mapping, Optional, Tuple, Union
from typing import Mapping, Optional

import numpy as np

Expand Down Expand Up @@ -148,12 +148,12 @@ def _get_sparse_buffer_properties(data: Mapping[str, np.ndarray]) -> BufferPrope
)


def get_buffer_properties(data: Union[np.ndarray, Mapping[str, np.ndarray]]) -> BufferProperties:
def get_buffer_properties(data: np.ndarray | Mapping[str, np.ndarray]) -> BufferProperties:
"""
Extract the properties of the dataset component
Args:
data (Union[np.ndarray, Mapping[str, np.ndarray]]): the dataset component.
data (np.ndarray | Mapping[str, np.ndarray]): the dataset component.
Raises:
ValueError: if the dataset component contains conflicting or bad data.
Expand Down Expand Up @@ -214,7 +214,7 @@ def _get_sparse_buffer_view(data: Mapping[str, np.ndarray], schema: ComponentMet
)


def get_buffer_view(data: Union[np.ndarray, Mapping[str, np.ndarray]], schema: ComponentMetaData) -> CBuffer:
def get_buffer_view(data: np.ndarray | Mapping[str, np.ndarray], schema: ComponentMetaData) -> CBuffer:
"""
Get a C API compatible view on a buffer.
Expand All @@ -233,7 +233,7 @@ def get_buffer_view(data: Union[np.ndarray, Mapping[str, np.ndarray]], schema: C

def create_buffer(
properties: BufferProperties, schema: ComponentMetaData
) -> Union[np.ndarray, Dict[ComponentType, np.ndarray]]:
) -> np.ndarray | dict[ComponentType, np.ndarray]:
"""
Create a buffer with the provided properties and type.
Expand All @@ -245,7 +245,7 @@ def create_buffer(
ValueError: if the buffer properties are not consistent.
Returns:
Union[np.ndarray, Dict[str, np.ndarray]]: a buffer with the correct properties.
np.ndarray | dict[[str, np.ndarray]: a buffer with the correct properties.
"""
if properties.is_sparse:
return _create_sparse_buffer(properties=properties, schema=schema)
Expand All @@ -270,15 +270,15 @@ def _create_uniform_buffer(properties: BufferProperties, schema: ComponentMetaDa
if properties.is_sparse:
raise ValueError(f"A uniform buffer cannot be sparse. {VALIDATOR_MSG}")

shape: Union[int, Tuple[int, int]] = (
shape: int | tuple[int, int] = (
(properties.batch_size, properties.n_elements_per_scenario)
if properties.is_batch
else properties.n_elements_per_scenario
)
return np.empty(shape=shape, dtype=schema.dtype)


def _create_sparse_buffer(properties: BufferProperties, schema: ComponentMetaData) -> Dict[str, np.ndarray]:
def _create_sparse_buffer(properties: BufferProperties, schema: ComponentMetaData) -> dict[str, np.ndarray]:
"""
Create a sparse buffer with the provided properties and type.
Expand Down
8 changes: 3 additions & 5 deletions src/power_grid_model/core/data_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@


from enum import Enum
from typing import Mapping, Tuple, Union
from typing import Mapping

import numpy as np

Expand Down Expand Up @@ -70,9 +70,7 @@ def prepare_input_view(input_data: Mapping[ComponentType, np.ndarray]) -> CConst
return CConstDataset(input_data, dataset_type=DatasetType.input)


def prepare_update_view(
update_data: Mapping[ComponentType, Union[np.ndarray, Mapping[str, np.ndarray]]]
) -> CConstDataset:
def prepare_update_view(update_data: Mapping[ComponentType, np.ndarray | Mapping[str, np.ndarray]]) -> CConstDataset:
"""
Create a view of the update data, or an empty view if not provided, in a format compatible with the PGM core libary.
Expand Down Expand Up @@ -139,7 +137,7 @@ def create_output_data(
for name, count in all_component_count.items():
# shape
if is_batch:
shape: Union[Tuple[int], Tuple[int, int]] = (batch_size, count)
shape: tuple[int] | tuple[int, int] = (batch_size, count)
else:
shape = (count,)
result_dict[name] = initialize_array(output_type.value, name, shape=shape, empty=True)
Expand Down
6 changes: 3 additions & 3 deletions src/power_grid_model/core/dataset_definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
# This file is automatically generated. DO NOT modify it manually!

from enum import Enum, EnumMeta
from typing import Any, Dict, Mapping
from typing import Any, Mapping

# pylint: disable=invalid-name

Expand Down Expand Up @@ -83,7 +83,7 @@ def _str_to_datatype(data_type: Any) -> DatasetType:
return data_type


def _map_to_datatypes(data: Mapping[Any, Any]) -> Dict[DatasetType, Any]:
def _map_to_datatypes(data: Mapping[Any, Any]) -> dict[DatasetType, Any]:
"""Helper function to map datatype str keys to DatasetType."""
return {_str_to_datatype(key): value for key, value in data.items()}

Expand All @@ -95,7 +95,7 @@ def _str_to_component_type(component: Any) -> ComponentType:
return component


def _map_to_component_types(data: Mapping[Any, Any]) -> Dict[ComponentType, Any]:
def _map_to_component_types(data: Mapping[Any, Any]) -> dict[ComponentType, Any]:
"""Helper function to map componenttype str keys to ComponentType."""
return {_str_to_component_type(key): value for key, value in data.items()}

Expand Down
Loading

0 comments on commit 7e01723

Please sign in to comment.