diff --git a/src/power_grid_model/validation/rules.py b/src/power_grid_model/validation/rules.py index 7046fb9f6..b506fe1ad 100644 --- a/src/power_grid_model/validation/rules.py +++ b/src/power_grid_model/validation/rules.py @@ -70,12 +70,12 @@ ValidationError, ) from power_grid_model.validation.utils import ( - eval_expression, - get_indexer, - get_mask, - get_valid_ids, - nan_type, - set_default_value, + _eval_expression, + _get_indexer, + _get_mask, + _get_valid_ids, + _nan_type, + _set_default_value, ) Error = TypeVar("Error", bound=ValidationError) @@ -370,17 +370,17 @@ def none_match_comparison( # pylint: disable=too-many-arguments where the value in the field of interest matched the comparison. """ if default_value_1 is not None: - set_default_value(data=data, component=component, field=field, default_value=default_value_1) + _set_default_value(data=data, component=component, field=field, default_value=default_value_1) if default_value_2 is not None: - set_default_value(data=data, component=component, field=field, default_value=default_value_2) + _set_default_value(data=data, component=component, field=field, default_value=default_value_2) component_data = data[component] if not isinstance(component_data, np.ndarray): raise NotImplementedError() # TODO(mgovers): add support for columnar data if isinstance(ref_value, tuple): - ref = tuple(eval_expression(component_data, v) for v in ref_value) + ref = tuple(_eval_expression(component_data, v) for v in ref_value) else: - ref = (eval_expression(component_data, ref_value),) + ref = (_eval_expression(component_data, ref_value),) matches = compare_fn(component_data[field], *ref) if matches.any(): if matches.ndim > 1: @@ -520,7 +520,7 @@ def all_valid_enum_values( """ enums: list[Type[Enum]] = enum if isinstance(enum, list) else [enum] - valid = {nan_type(component, field)} + valid = {_nan_type(component, field)} for enum_type in enums: valid.update(list(enum_type)) @@ -557,13 +557,13 @@ def all_valid_associated_enum_values( # pylint: disable=too-many-positional-arg """ enums: list[Type[Enum]] = enum if isinstance(enum, list) else [enum] - valid_ids = get_valid_ids(data=data, ref_components=ref_components) + valid_ids = _get_valid_ids(data=data, ref_components=ref_components) mask = np.logical_and( - get_mask(data=data, component=component, field=field, **filters), + _get_mask(data=data, component=component, field=field, **filters), np.isin(data[component][ref_object_id_field], valid_ids), ) - valid = {nan_type(component, field)} + valid = {_nan_type(component, field)} for enum_type in enums: valid.update(list(enum_type)) @@ -596,8 +596,8 @@ def all_valid_ids( A list containing zero or one InvalidIdError, listing all ids where the value in the field of interest was not a valid object identifier. """ - valid_ids = get_valid_ids(data=data, ref_components=ref_components) - mask = get_mask(data=data, component=component, field=field, **filters) + valid_ids = _get_valid_ids(data=data, ref_components=ref_components) + mask = _get_mask(data=data, component=component, field=field, **filters) # Find any values that can't be found in the set of ids invalid = np.logical_and(mask, np.isin(data[component][field], valid_ids, invert=True)) @@ -779,7 +779,7 @@ def none_missing( for field in fields: if isinstance(field, list): field = field[0] - nan = nan_type(component, field) + nan = _nan_type(component, field) if np.isnan(nan): invalid = np.isnan(data[component][field][index]) else: @@ -939,14 +939,14 @@ def all_supported_tap_control_side( # pylint: disable=too-many-arguments A list containing zero or more InvalidAssociatedEnumValueErrors; listing all the ids of components where the field of interest was invalid, given the referenced object's field. """ - mask = get_mask(data=data, component=component, field=control_side_field, **filters) + mask = _get_mask(data=data, component=component, field=control_side_field, **filters) values = data[component][control_side_field][mask] invalid = np.zeros_like(mask) for ref_component, ref_field in tap_side_fields: if ref_component in data: - indices = get_indexer(data[ref_component]["id"], data[component][regulated_object_field], default_value=-1) + indices = _get_indexer(data[ref_component]["id"], data[component][regulated_object_field], default_value=-1) found = indices != -1 ref_comp_values = data[ref_component][ref_field][indices[found]] invalid[found] = np.logical_or(invalid[found], values[found] == ref_comp_values) diff --git a/src/power_grid_model/validation/utils.py b/src/power_grid_model/validation/utils.py index bc23e99e1..2d2df85b8 100644 --- a/src/power_grid_model/validation/utils.py +++ b/src/power_grid_model/validation/utils.py @@ -22,7 +22,7 @@ from power_grid_model.validation.errors import ValidationError -def eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarray: +def _eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarray: """ Wrapper function that checks the type of the 'expression'. If the expression is a string, it is assumed to be a field expression and the expression is validated. Otherwise it is assumed to be a numerical value and the value @@ -42,11 +42,11 @@ def eval_expression(data: np.ndarray, expression: int | float | str) -> np.ndarr """ if isinstance(expression, str): - return eval_field_expression(data, expression) + return _eval_field_expression(data, expression) return np.array(expression) -def eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: +def _eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: """ A field expression can either be the name of a field (e.g. 'field_x') in the data, or a ratio between two fields (e.g. 'field_x / field_y'). The expression is checked on validity and then the fields are checked to be present in @@ -92,18 +92,18 @@ def eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: return np.true_divide(data[fields[0]], data[fields[1]]) -def update_input_data(input_data: SingleDataset, update_data: SingleDataset): +def _update_input_data(input_data: SingleDataset, update_data: SingleDataset): """ Update the input data using the available non-nan values in the update data. """ merged_data = {component: array.copy() for component, array in input_data.items()} for component in update_data.keys(): - update_component_data(component, merged_data[component], update_data[component]) + _update_component_data(component, merged_data[component], update_data[component]) return merged_data -def update_component_data( +def _update_component_data( component: ComponentTypeLike, input_data: SingleComponentData, update_data: SingleComponentData ) -> None: """ @@ -133,7 +133,7 @@ def _update_component_array_data( for field in update_data.dtype.names: if field == "id": continue - nan = nan_type(component, field, DatasetType.update) + nan = _nan_type(component, field, DatasetType.update) if np.isnan(nan): mask = ~np.isnan(update_data[field]) else: @@ -143,12 +143,12 @@ def _update_component_array_data( for phase in range(mask.shape[1]): # find indexers of to-be-updated object sub_mask = mask[:, phase] - idx = get_indexer(input_data["id"], update_data_ids[sub_mask]) + idx = _get_indexer(input_data["id"], update_data_ids[sub_mask]) # update input_data[field][idx, phase] = update_data[field][sub_mask, phase] else: # find indexers of to-be-updated object - idx = get_indexer(input_data["id"], update_data_ids[mask]) + idx = _get_indexer(input_data["id"], update_data_ids[mask]) # update input_data[field][idx] = update_data[field][mask] @@ -190,7 +190,7 @@ def errors_to_string( return msg -def nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = DatasetType.input): +def _nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = DatasetType.input): """ Helper function to retrieve the nan value for a certain field as defined in the power_grid_meta_data. """ @@ -198,7 +198,7 @@ def nan_type(component: ComponentTypeLike, field: str, data_type: DatasetType = return power_grid_meta_data[data_type][component].nans[field] -def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[int] = None) -> np.ndarray: +def _get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[int] = None) -> np.ndarray: """ Given array of values from a source and a target dataset. Find the position of each value in the target dataset in the context of the source dataset. @@ -209,7 +209,7 @@ def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[ >>> input_ids = [1, 2, 3, 4, 5] >>> update_ids = [3] - >>> assert get_indexer(input_ids, update_ids) == np.array([2]) + >>> assert _get_indexer(input_ids, update_ids) == np.array([2]) Args: source: array of values in the source dataset @@ -238,7 +238,7 @@ def get_indexer(source: np.ndarray, target: np.ndarray, default_value: Optional[ return np.where(source[clipped_indices] == target, permutation_sort[clipped_indices], default_value) -def set_default_value( +def _set_default_value( data: SingleDataset, component: ComponentTypeLike, field: str, default_value: int | float | np.ndarray ): """ @@ -256,17 +256,17 @@ def set_default_value( Returns: """ - if np.isnan(nan_type(component, field)): + if np.isnan(_nan_type(component, field)): mask = np.isnan(data[component][field]) else: - mask = data[component][field] == nan_type(component, field) + mask = data[component][field] == _nan_type(component, field) if isinstance(default_value, np.ndarray): data[component][field][mask] = default_value[mask] else: data[component][field][mask] = default_value -def get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ComponentTypeVar]) -> list[int]: +def _get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ComponentTypeVar]) -> list[int]: """ This function returns the valid IDs specified by all ref_components @@ -286,7 +286,7 @@ def get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ valid_ids = set() for ref_component in ref_components: if ref_component in data: - nan = nan_type(ref_component, "id") + nan = _nan_type(ref_component, "id") if np.isnan(nan): mask = ~np.isnan(data[ref_component]["id"]) else: @@ -296,7 +296,7 @@ def get_valid_ids(data: SingleDataset, ref_components: ComponentTypeLike | list[ return list(valid_ids) -def get_mask(data: SingleDataset, component: ComponentTypeLike, field: str, **filters: Any) -> np.ndarray: +def _get_mask(data: SingleDataset, component: ComponentTypeLike, field: str, **filters: Any) -> np.ndarray: """ Get a mask based on the specified filters. E.g. measured_terminal_type=MeasuredTerminalType.source. diff --git a/src/power_grid_model/validation/validation.py b/src/power_grid_model/validation/validation.py index 5e76af9a8..10dba4a2e 100644 --- a/src/power_grid_model/validation/validation.py +++ b/src/power_grid_model/validation/validation.py @@ -19,7 +19,10 @@ import numpy as np from power_grid_model import ComponentType, DatasetType, power_grid_meta_data -from power_grid_model._utils import compatibility_convert_row_columnar_dataset, convert_batch_dataset_to_batch_list +from power_grid_model._utils import ( + compatibility_convert_row_columnar_dataset as _compatibility_convert_row_columnar_dataset, + convert_batch_dataset_to_batch_list as _convert_batch_dataset_to_batch_list, +) from power_grid_model.data_types import BatchDataset, Dataset, SingleDataset from power_grid_model.enum import ( Branch3Side, @@ -39,30 +42,30 @@ ValidationError, ) from power_grid_model.validation.rules import ( - all_between, - all_between_or_at, - all_boolean, - all_cross_unique, - all_enabled_identical, - all_finite, - all_greater_or_equal, - all_greater_than_or_equal_to_zero, - all_greater_than_zero, - all_less_than, - all_not_two_values_equal, - all_not_two_values_zero, - all_supported_tap_control_side, - all_unique, - all_valid_associated_enum_values, - all_valid_clocks, - all_valid_enum_values, - all_valid_fault_phases, - all_valid_ids, - ids_valid_in_update_data_set, - none_missing, - valid_p_q_sigma, + all_between as _all_between, + all_between_or_at as _all_between_or_at, + all_boolean as _all_boolean, + all_cross_unique as _all_cross_unique, + all_enabled_identical as _all_enabled_identical, + all_finite as _all_finite, + all_greater_or_equal as _all_greater_or_equal, + all_greater_than_or_equal_to_zero as _all_greater_than_or_equal_to_zero, + all_greater_than_zero as _all_greater_than_zero, + all_less_than as _all_less_than, + all_not_two_values_equal as _all_not_two_values_equal, + all_not_two_values_zero as _all_not_two_values_zero, + all_supported_tap_control_side as _all_supported_tap_control_side, + all_unique as _all_unique, + all_valid_associated_enum_values as _all_valid_associated_enum_values, + all_valid_clocks as _all_valid_clocks, + all_valid_enum_values as _all_valid_enum_values, + all_valid_fault_phases as _all_valid_fault_phases, + all_valid_ids as _all_valid_ids, + ids_valid_in_update_data_set as _ids_valid_in_update_data_set, + none_missing as _none_missing, + valid_p_q_sigma as _valid_p_q_sigma, ) -from power_grid_model.validation.utils import update_input_data +from power_grid_model.validation.utils import _update_input_data def validate_input_data( @@ -88,7 +91,7 @@ def validate_input_data( Error: KeyError | TypeError | ValueError: if the data structure is invalid. """ # Convert to row based if in columnar or mixed format format - row_input_data = compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) + row_input_data = _compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) # A deep copy is made of the input data, since default values will be added in the validation process input_data_copy = copy.deepcopy(row_input_data) @@ -136,7 +139,7 @@ def validate_batch_data( Error: KeyError | TypeError | ValueError: if the data structure is invalid. """ # Convert to row based if in columnar or mixed format - row_input_data = compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) + row_input_data = _compatibility_convert_row_columnar_dataset(input_data, None, DatasetType.input) # A deep copy is made of the input data, since default values will be added in the validation process input_data_copy = copy.deepcopy(row_input_data) @@ -144,11 +147,11 @@ def validate_batch_data( input_errors: list[ValidationError] = list(validate_unique_ids_across_components(input_data_copy)) - batch_data = convert_batch_dataset_to_batch_list(update_data, DatasetType.update) + batch_data = _convert_batch_dataset_to_batch_list(update_data, DatasetType.update) errors = {} for batch, batch_update_data in enumerate(batch_data): - row_update_data = compatibility_convert_row_columnar_dataset(batch_update_data, None, DatasetType.update) + row_update_data = _compatibility_convert_row_columnar_dataset(batch_update_data, None, DatasetType.update) assert_valid_data_structure(row_update_data, DatasetType.update) id_errors: list[IdNotInDatasetError | InvalidIdError] = validate_ids(row_update_data, input_data_copy) @@ -156,7 +159,7 @@ def validate_batch_data( if not id_errors: batch_errors = input_errors - merged_data = update_input_data(input_data_copy, row_update_data) + merged_data = _update_input_data(input_data_copy, row_update_data) batch_errors += validate_required_values(merged_data, calculation_type, symmetric) batch_errors += validate_values(merged_data, calculation_type) @@ -216,7 +219,7 @@ def validate_unique_ids_across_components(data: SingleDataset) -> list[MultiComp An empty list if all ids are unique, or a list of MultiComponentNotUniqueErrors for all components that have non-unique ids """ - return all_cross_unique(data, [(component, "id") for component in data]) + return _all_cross_unique(data, [(component, "id") for component in data]) def validate_ids(update_data: SingleDataset, input_data: SingleDataset) -> list[IdNotInDatasetError | InvalidIdError]: @@ -237,7 +240,7 @@ def validate_ids(update_data: SingleDataset, input_data: SingleDataset) -> list[ """ errors = ( - ids_valid_in_update_data_set(update_data, input_data, component, "update_data") for component in update_data + _ids_valid_in_update_data_set(update_data, input_data, component, "update_data") for component in update_data ) return list(chain(*errors)) @@ -433,7 +436,7 @@ def is_nested_list(items): def process_nested_items(component, items, data, results): for index, item in enumerate(sublist for sublist in items): if index < len(data[component]): - results.append(none_missing(data, component, item, index)) + results.append(_none_missing(data, component, item, index)) results = [] @@ -443,7 +446,7 @@ def process_nested_items(component, items, data, results): if is_nested_list(items): process_nested_items(component, items, data, results) else: - results.append(none_missing(data, component, items, 0)) + results.append(_none_missing(data, component, items, 0)) return list(chain(*results)) @@ -461,7 +464,7 @@ def validate_values(data: SingleDataset, calculation_type: Optional[CalculationT """ errors: list[ValidationError] = list( - all_finite( + _all_finite( data, { ComponentType.sym_power_sensor: ["power_sigma"], @@ -514,74 +517,74 @@ def validate_values(data: SingleDataset, calculation_type: Optional[CalculationT def validate_base(data: SingleDataset, component: ComponentType) -> list[ValidationError]: - errors: list[ValidationError] = list(all_unique(data, component, "id")) + errors: list[ValidationError] = list(_all_unique(data, component, "id")) return errors def validate_node(data: SingleDataset) -> list[ValidationError]: errors = validate_base(data, ComponentType.node) - errors += all_greater_than_zero(data, ComponentType.node, "u_rated") + errors += _all_greater_than_zero(data, ComponentType.node, "u_rated") return errors def validate_branch(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_valid_ids(data, component, "from_node", ComponentType.node) - errors += all_valid_ids(data, component, "to_node", ComponentType.node) - errors += all_not_two_values_equal(data, component, "to_node", "from_node") - errors += all_boolean(data, component, "from_status") - errors += all_boolean(data, component, "to_status") + errors += _all_valid_ids(data, component, "from_node", ComponentType.node) + errors += _all_valid_ids(data, component, "to_node", ComponentType.node) + errors += _all_not_two_values_equal(data, component, "to_node", "from_node") + errors += _all_boolean(data, component, "from_status") + errors += _all_boolean(data, component, "to_status") return errors def validate_line(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.line) - errors += all_not_two_values_zero(data, ComponentType.line, "r1", "x1") - errors += all_not_two_values_zero(data, ComponentType.line, "r0", "x0") - errors += all_greater_than_zero(data, ComponentType.line, "i_n") + errors += _all_not_two_values_zero(data, ComponentType.line, "r1", "x1") + errors += _all_not_two_values_zero(data, ComponentType.line, "r0", "x0") + errors += _all_greater_than_zero(data, ComponentType.line, "i_n") return errors def validate_generic_branch(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.generic_branch) - errors += all_greater_than_zero(data, ComponentType.generic_branch, "k") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.generic_branch, "sn") + errors += _all_greater_than_zero(data, ComponentType.generic_branch, "k") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.generic_branch, "sn") return errors def validate_transformer(data: SingleDataset) -> list[ValidationError]: errors = validate_branch(data, ComponentType.transformer) - errors += all_greater_than_zero(data, ComponentType.transformer, "u1") - errors += all_greater_than_zero(data, ComponentType.transformer, "u2") - errors += all_greater_than_zero(data, ComponentType.transformer, "sn") - errors += all_greater_or_equal(data, ComponentType.transformer, "uk", "pk/sn") - errors += all_between(data, ComponentType.transformer, "uk", 0, 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "pk") - errors += all_greater_or_equal(data, ComponentType.transformer, "i0", "p0/sn") - errors += all_less_than(data, ComponentType.transformer, "i0", 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "p0") - errors += all_valid_enum_values(data, ComponentType.transformer, "winding_from", WindingType) - errors += all_valid_enum_values(data, ComponentType.transformer, "winding_to", WindingType) - errors += all_between_or_at(data, ComponentType.transformer, "clock", 0, 12) - errors += all_valid_clocks(data, ComponentType.transformer, "clock", "winding_from", "winding_to") - errors += all_valid_enum_values(data, ComponentType.transformer, "tap_side", BranchSide) - errors += all_between_or_at( + errors += _all_greater_than_zero(data, ComponentType.transformer, "u1") + errors += _all_greater_than_zero(data, ComponentType.transformer, "u2") + errors += _all_greater_than_zero(data, ComponentType.transformer, "sn") + errors += _all_greater_or_equal(data, ComponentType.transformer, "uk", "pk/sn") + errors += _all_between(data, ComponentType.transformer, "uk", 0, 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "pk") + errors += _all_greater_or_equal(data, ComponentType.transformer, "i0", "p0/sn") + errors += _all_less_than(data, ComponentType.transformer, "i0", 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "p0") + errors += _all_valid_enum_values(data, ComponentType.transformer, "winding_from", WindingType) + errors += _all_valid_enum_values(data, ComponentType.transformer, "winding_to", WindingType) + errors += _all_between_or_at(data, ComponentType.transformer, "clock", 0, 12) + errors += _all_valid_clocks(data, ComponentType.transformer, "clock", "winding_from", "winding_to") + errors += _all_valid_enum_values(data, ComponentType.transformer, "tap_side", BranchSide) + errors += _all_between_or_at( data, ComponentType.transformer, "tap_pos", "tap_min", "tap_max", data[ComponentType.transformer]["tap_nom"], 0 ) - errors += all_between_or_at(data, ComponentType.transformer, "tap_nom", "tap_min", "tap_max", 0) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "tap_size") - errors += all_greater_or_equal( + errors += _all_between_or_at(data, ComponentType.transformer, "tap_nom", "tap_min", "tap_max", 0) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer, "tap_size") + errors += _all_greater_or_equal( data, ComponentType.transformer, "uk_min", "pk_min/sn", data[ComponentType.transformer]["uk"] ) - errors += all_between(data, ComponentType.transformer, "uk_min", 0, 1, data[ComponentType.transformer]["uk"]) - errors += all_greater_or_equal( + errors += _all_between(data, ComponentType.transformer, "uk_min", 0, 1, data[ComponentType.transformer]["uk"]) + errors += _all_greater_or_equal( data, ComponentType.transformer, "uk_max", "pk_max/sn", data[ComponentType.transformer]["uk"] ) - errors += all_between(data, ComponentType.transformer, "uk_max", 0, 1, data[ComponentType.transformer]["uk"]) - errors += all_greater_than_or_equal_to_zero( + errors += _all_between(data, ComponentType.transformer, "uk_max", 0, 1, data[ComponentType.transformer]["uk"]) + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer, "pk_min", data[ComponentType.transformer]["pk"] ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer, "pk_max", data[ComponentType.transformer]["pk"] ) return errors @@ -589,51 +592,51 @@ def validate_transformer(data: SingleDataset) -> list[ValidationError]: def validate_branch3(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_valid_ids(data, component, "node_1", ComponentType.node) - errors += all_valid_ids(data, component, "node_2", ComponentType.node) - errors += all_valid_ids(data, component, "node_3", ComponentType.node) - errors += all_not_two_values_equal(data, component, "node_1", "node_2") - errors += all_not_two_values_equal(data, component, "node_1", "node_3") - errors += all_not_two_values_equal(data, component, "node_2", "node_3") - errors += all_boolean(data, component, "status_1") - errors += all_boolean(data, component, "status_2") - errors += all_boolean(data, component, "status_3") + errors += _all_valid_ids(data, component, "node_1", ComponentType.node) + errors += _all_valid_ids(data, component, "node_2", ComponentType.node) + errors += _all_valid_ids(data, component, "node_3", ComponentType.node) + errors += _all_not_two_values_equal(data, component, "node_1", "node_2") + errors += _all_not_two_values_equal(data, component, "node_1", "node_3") + errors += _all_not_two_values_equal(data, component, "node_2", "node_3") + errors += _all_boolean(data, component, "status_1") + errors += _all_boolean(data, component, "status_2") + errors += _all_boolean(data, component, "status_3") return errors # pylint: disable=R0915 def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationError]: errors = validate_branch3(data, ComponentType.three_winding_transformer) - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u1") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u2") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "u3") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_1") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_2") - errors += all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_3") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_1") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_2") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_1") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_3") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_2") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_3") - errors += all_between(data, ComponentType.three_winding_transformer, "uk_12", 0, 1) - errors += all_between(data, ComponentType.three_winding_transformer, "uk_13", 0, 1) - errors += all_between(data, ComponentType.three_winding_transformer, "uk_23", 0, 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_12") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_13") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_23") - errors += all_greater_or_equal(data, ComponentType.three_winding_transformer, "i0", "p0/sn_1") - errors += all_less_than(data, ComponentType.three_winding_transformer, "i0", 1) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "p0") - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_1", WindingType) - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_2", WindingType) - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_3", WindingType) - errors += all_between_or_at(data, ComponentType.three_winding_transformer, "clock_12", 0, 12) - errors += all_between_or_at(data, ComponentType.three_winding_transformer, "clock_13", 0, 12) - errors += all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_12", "winding_1", "winding_2") - errors += all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_13", "winding_1", "winding_3") - errors += all_valid_enum_values(data, ComponentType.three_winding_transformer, "tap_side", Branch3Side) - errors += all_between_or_at( + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u1") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u2") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u3") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_1") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_2") + errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "sn_3") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_1") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_12", "pk_12/sn_2") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_1") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_13", "pk_13/sn_3") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_2") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "uk_23", "pk_23/sn_3") + errors += _all_between(data, ComponentType.three_winding_transformer, "uk_12", 0, 1) + errors += _all_between(data, ComponentType.three_winding_transformer, "uk_13", 0, 1) + errors += _all_between(data, ComponentType.three_winding_transformer, "uk_23", 0, 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_12") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_13") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "pk_23") + errors += _all_greater_or_equal(data, ComponentType.three_winding_transformer, "i0", "p0/sn_1") + errors += _all_less_than(data, ComponentType.three_winding_transformer, "i0", 1) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "p0") + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_1", WindingType) + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_2", WindingType) + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "winding_3", WindingType) + errors += _all_between_or_at(data, ComponentType.three_winding_transformer, "clock_12", 0, 12) + errors += _all_between_or_at(data, ComponentType.three_winding_transformer, "clock_13", 0, 12) + errors += _all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_12", "winding_1", "winding_2") + errors += _all_valid_clocks(data, ComponentType.three_winding_transformer, "clock_13", "winding_1", "winding_3") + errors += _all_valid_enum_values(data, ComponentType.three_winding_transformer, "tap_side", Branch3Side) + errors += _all_between_or_at( data, ComponentType.three_winding_transformer, "tap_pos", @@ -642,51 +645,51 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr data[ComponentType.three_winding_transformer]["tap_nom"], 0, ) - errors += all_between_or_at(data, ComponentType.three_winding_transformer, "tap_nom", "tap_min", "tap_max", 0) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "tap_size") - errors += all_greater_or_equal( + errors += _all_between_or_at(data, ComponentType.three_winding_transformer, "tap_nom", "tap_min", "tap_max", 0) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.three_winding_transformer, "tap_size") + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_min", "pk_12_min/sn_1", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_min", "pk_12_min/sn_2", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_min", "pk_13_min/sn_1", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_min", "pk_13_min/sn_3", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_min", "pk_23_min/sn_2", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_min", "pk_23_min/sn_3", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_12_min", @@ -694,7 +697,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_13_min", @@ -702,7 +705,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_23_min", @@ -710,49 +713,49 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_max", "pk_12_max/sn_1", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_12_max", "pk_12_max/sn_2", data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_max", "pk_13_max/sn_1", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_13_max", "pk_13_max/sn_3", data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_max", "pk_23_max/sn_2", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_or_equal( + errors += _all_greater_or_equal( data, ComponentType.three_winding_transformer, "uk_23_max", "pk_23_max/sn_3", data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_12_max", @@ -760,7 +763,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_12"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_13_max", @@ -768,7 +771,7 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_13"], ) - errors += all_between( + errors += _all_between( data, ComponentType.three_winding_transformer, "uk_23_max", @@ -776,37 +779,37 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr 1, data[ComponentType.three_winding_transformer]["uk_23"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_12_min", data[ComponentType.three_winding_transformer]["pk_12"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_13_min", data[ComponentType.three_winding_transformer]["pk_13"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_23_min", data[ComponentType.three_winding_transformer]["pk_23"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_12_max", data[ComponentType.three_winding_transformer]["pk_12"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_13_max", data[ComponentType.three_winding_transformer]["pk_13"], ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.three_winding_transformer, "pk_23_max", @@ -817,23 +820,23 @@ def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationEr def validate_appliance(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_boolean(data, component, "status") - errors += all_valid_ids(data, component, "node", ComponentType.node) + errors += _all_boolean(data, component, "status") + errors += _all_valid_ids(data, component, "node", ComponentType.node) return errors def validate_source(data: SingleDataset) -> list[ValidationError]: errors = validate_appliance(data, ComponentType.source) - errors += all_greater_than_zero(data, ComponentType.source, "u_ref") - errors += all_greater_than_zero(data, ComponentType.source, "sk") - errors += all_greater_than_or_equal_to_zero(data, ComponentType.source, "rx_ratio") - errors += all_greater_than_zero(data, ComponentType.source, "z01_ratio") + errors += _all_greater_than_zero(data, ComponentType.source, "u_ref") + errors += _all_greater_than_zero(data, ComponentType.source, "sk") + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.source, "rx_ratio") + errors += _all_greater_than_zero(data, ComponentType.source, "z01_ratio") return errors def validate_generic_load_gen(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_appliance(data, component) - errors += all_valid_enum_values(data, component, "type", LoadGenType) + errors += _all_valid_enum_values(data, component, "type", LoadGenType) return errors @@ -844,17 +847,17 @@ def validate_shunt(data: SingleDataset) -> list[ValidationError]: def validate_generic_voltage_sensor(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_greater_than_zero(data, component, "u_sigma") - errors += all_greater_than_zero(data, component, "u_measured") - errors += all_valid_ids(data, component, "measured_object", ComponentType.node) + errors += _all_greater_than_zero(data, component, "u_sigma") + errors += _all_greater_than_zero(data, component, "u_measured") + errors += _all_valid_ids(data, component, "measured_object", ComponentType.node) return errors def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_greater_than_zero(data, component, "power_sigma") - errors += all_valid_enum_values(data, component, "measured_terminal_type", MeasuredTerminalType) - errors += all_valid_ids( + errors += _all_greater_than_zero(data, component, "power_sigma") + errors += _all_valid_enum_values(data, component, "measured_terminal_type", MeasuredTerminalType) + errors += _all_valid_ids( data, component, field="measured_object", @@ -872,70 +875,70 @@ def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) ComponentType.asym_gen, ], ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.line, ComponentType.generic_branch, ComponentType.transformer], measured_terminal_type=MeasuredTerminalType.branch_from, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.line, ComponentType.generic_branch, ComponentType.transformer], measured_terminal_type=MeasuredTerminalType.branch_to, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.source, measured_terminal_type=MeasuredTerminalType.source, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.shunt, measured_terminal_type=MeasuredTerminalType.shunt, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.sym_load, ComponentType.asym_load], measured_terminal_type=MeasuredTerminalType.load, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=[ComponentType.sym_gen, ComponentType.asym_gen], measured_terminal_type=MeasuredTerminalType.generator, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.three_winding_transformer, measured_terminal_type=MeasuredTerminalType.branch3_1, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.three_winding_transformer, measured_terminal_type=MeasuredTerminalType.branch3_2, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", ref_components=ComponentType.three_winding_transformer, measured_terminal_type=MeasuredTerminalType.branch3_3, ) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="measured_object", @@ -943,27 +946,27 @@ def validate_generic_power_sensor(data: SingleDataset, component: ComponentType) measured_terminal_type=MeasuredTerminalType.node, ) if component in ("sym_power_sensor", "asym_power_sensor"): - errors += valid_p_q_sigma(data, component) + errors += _valid_p_q_sigma(data, component) return errors def validate_fault(data: SingleDataset) -> list[ValidationError]: errors = validate_base(data, ComponentType.fault) - errors += all_boolean(data, ComponentType.fault, "status") - errors += all_valid_enum_values(data, ComponentType.fault, "fault_type", FaultType) - errors += all_valid_enum_values(data, ComponentType.fault, "fault_phase", FaultPhase) - errors += all_valid_fault_phases(data, ComponentType.fault, "fault_type", "fault_phase") - errors += all_valid_ids(data, ComponentType.fault, field="fault_object", ref_components=ComponentType.node) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.fault, "r_f") - errors += all_enabled_identical(data, ComponentType.fault, "fault_type", "status") - errors += all_enabled_identical(data, ComponentType.fault, "fault_phase", "status") + errors += _all_boolean(data, ComponentType.fault, "status") + errors += _all_valid_enum_values(data, ComponentType.fault, "fault_type", FaultType) + errors += _all_valid_enum_values(data, ComponentType.fault, "fault_phase", FaultPhase) + errors += _all_valid_fault_phases(data, ComponentType.fault, "fault_type", "fault_phase") + errors += _all_valid_ids(data, ComponentType.fault, field="fault_object", ref_components=ComponentType.node) + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.fault, "r_f") + errors += _all_enabled_identical(data, ComponentType.fault, "fault_type", "status") + errors += _all_enabled_identical(data, ComponentType.fault, "fault_phase", "status") return errors def validate_regulator(data: SingleDataset, component: ComponentType) -> list[ValidationError]: errors = validate_base(data, component) - errors += all_valid_ids( + errors += _all_valid_ids( data, component, field="regulated_object", @@ -974,11 +977,11 @@ def validate_regulator(data: SingleDataset, component: ComponentType) -> list[Va def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationError]: errors = validate_regulator(data, ComponentType.transformer_tap_regulator) - errors += all_boolean(data, ComponentType.transformer_tap_regulator, "status") - errors += all_valid_enum_values( + errors += _all_boolean(data, ComponentType.transformer_tap_regulator, "status") + errors += _all_valid_enum_values( data, ComponentType.transformer_tap_regulator, "control_side", [BranchSide, Branch3Side] ) - errors += all_valid_associated_enum_values( + errors += _all_valid_associated_enum_values( data, ComponentType.transformer_tap_regulator, "control_side", @@ -986,7 +989,7 @@ def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationEr [ComponentType.transformer], [BranchSide], ) - errors += all_valid_associated_enum_values( + errors += _all_valid_associated_enum_values( data, ComponentType.transformer_tap_regulator, "control_side", @@ -994,15 +997,15 @@ def validate_transformer_tap_regulator(data: SingleDataset) -> list[ValidationEr [ComponentType.three_winding_transformer], [Branch3Side], ) - errors += all_greater_than_or_equal_to_zero(data, ComponentType.transformer_tap_regulator, "u_set") - errors += all_greater_than_zero(data, ComponentType.transformer_tap_regulator, "u_band") - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero(data, ComponentType.transformer_tap_regulator, "u_set") + errors += _all_greater_than_zero(data, ComponentType.transformer_tap_regulator, "u_band") + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer_tap_regulator, "line_drop_compensation_r", 0.0 ) - errors += all_greater_than_or_equal_to_zero( + errors += _all_greater_than_or_equal_to_zero( data, ComponentType.transformer_tap_regulator, "line_drop_compensation_x", 0.0 ) - errors += all_supported_tap_control_side( + errors += _all_supported_tap_control_side( data, ComponentType.transformer_tap_regulator, "control_side", diff --git a/tests/unit/validation/test_input_validation.py b/tests/unit/validation/test_input_validation.py index 15a01ae1d..eab6d2ceb 100644 --- a/tests/unit/validation/test_input_validation.py +++ b/tests/unit/validation/test_input_validation.py @@ -36,7 +36,7 @@ TwoValuesZeroError, UnsupportedTransformerRegulationError, ) -from power_grid_model.validation.utils import nan_type +from power_grid_model.validation.utils import _nan_type @pytest.fixture @@ -98,12 +98,12 @@ def original_data() -> dict[ComponentType, np.ndarray]: transformer["tap_pos"] = [-1, 6, -4] transformer["tap_min"] = [-2, 4, 3] transformer["tap_max"] = [2, -4, -3] - transformer["tap_nom"] = [-3, nan_type("transformer", "tap_nom"), 4] + transformer["tap_nom"] = [-3, _nan_type("transformer", "tap_nom"), 4] transformer["tap_size"] = [262.5, 0.0, -10.0] - transformer["uk_min"] = [0.0000000005, nan_type("transformer", "uk_min"), 0.9] - transformer["uk_max"] = [0.0000000005, nan_type("transformer", "uk_max"), 0.8] - transformer["pk_min"] = [300.0, 0.0, nan_type("transformer", "pk_min")] - transformer["pk_max"] = [400.0, -0.1, nan_type("transformer", "pk_max")] + transformer["uk_min"] = [0.0000000005, _nan_type("transformer", "uk_min"), 0.9] + transformer["uk_max"] = [0.0000000005, _nan_type("transformer", "uk_max"), 0.8] + transformer["pk_min"] = [300.0, 0.0, _nan_type("transformer", "pk_min")] + transformer["pk_max"] = [400.0, -0.1, _nan_type("transformer", "pk_max")] three_winding_transformer = initialize_array(DatasetType.input, ComponentType.three_winding_transformer, 4) three_winding_transformer["id"] = [1, 28, 29, 30] @@ -137,49 +137,49 @@ def original_data() -> dict[ComponentType, np.ndarray]: three_winding_transformer["tap_min"] = [-10, -10, -10, -10] three_winding_transformer["tap_max"] = [10, 10, 10, 10] three_winding_transformer["tap_size"] = [-12, 0, 3, 130] - three_winding_transformer["tap_nom"] = [-12, 41, nan_type("three_winding_transformer", "tap_nom"), 0] + three_winding_transformer["tap_nom"] = [-12, 41, _nan_type("three_winding_transformer", "tap_nom"), 0] three_winding_transformer["uk_12_min"] = [ - nan_type("three_winding_transformer", "uk_12_min"), + _nan_type("three_winding_transformer", "uk_12_min"), 1.1, 0.05, - nan_type("three_winding_transformer", "uk_12_min"), + _nan_type("three_winding_transformer", "uk_12_min"), ] three_winding_transformer["uk_13_min"] = [ - nan_type("three_winding_transformer", "uk_13_min"), + _nan_type("three_winding_transformer", "uk_13_min"), 1.2, 0.3, - nan_type("three_winding_transformer", "uk_13_min"), + _nan_type("three_winding_transformer", "uk_13_min"), ] three_winding_transformer["uk_23_min"] = [ - nan_type("three_winding_transformer", "uk_23_min"), + _nan_type("three_winding_transformer", "uk_23_min"), 1, 0.15, - nan_type("three_winding_transformer", "uk_23_min"), + _nan_type("three_winding_transformer", "uk_23_min"), ] - three_winding_transformer["pk_12_min"] = [-450, nan_type("three_winding_transformer", "pk_12_min"), 10, 40] - three_winding_transformer["pk_13_min"] = [-40, nan_type("three_winding_transformer", "pk_13_min"), 40, 50] - three_winding_transformer["pk_23_min"] = [-120, nan_type("three_winding_transformer", "pk_23_min"), 40, 30] + three_winding_transformer["pk_12_min"] = [-450, _nan_type("three_winding_transformer", "pk_12_min"), 10, 40] + three_winding_transformer["pk_13_min"] = [-40, _nan_type("three_winding_transformer", "pk_13_min"), 40, 50] + three_winding_transformer["pk_23_min"] = [-120, _nan_type("three_winding_transformer", "pk_23_min"), 40, 30] three_winding_transformer["uk_12_max"] = [ - nan_type("three_winding_transformer", "uk_12_max"), + _nan_type("three_winding_transformer", "uk_12_max"), 1.1, 0.05, - nan_type("three_winding_transformer", "uk_12_max"), + _nan_type("three_winding_transformer", "uk_12_max"), ] three_winding_transformer["uk_13_max"] = [ - nan_type("three_winding_transformer", "uk_13_max"), + _nan_type("three_winding_transformer", "uk_13_max"), 1.2, 0.3, - nan_type("three_winding_transformer", "uk_13_max"), + _nan_type("three_winding_transformer", "uk_13_max"), ] three_winding_transformer["uk_23_max"] = [ - nan_type("three_winding_transformer", "uk_23_max"), + _nan_type("three_winding_transformer", "uk_23_max"), 1, 0.15, - nan_type("three_winding_transformer", "uk_23_max"), + _nan_type("three_winding_transformer", "uk_23_max"), ] - three_winding_transformer["pk_12_max"] = [-450, nan_type("three_winding_transformer", "pk_12_max"), 10, 40] - three_winding_transformer["pk_13_max"] = [-40, nan_type("three_winding_transformer", "pk_12_max"), 40, 50] - three_winding_transformer["pk_23_max"] = [-120, nan_type("three_winding_transformer", "pk_12_max"), 40, 30] + three_winding_transformer["pk_12_max"] = [-450, _nan_type("three_winding_transformer", "pk_12_max"), 10, 40] + three_winding_transformer["pk_13_max"] = [-40, _nan_type("three_winding_transformer", "pk_12_max"), 40, 50] + three_winding_transformer["pk_23_max"] = [-120, _nan_type("three_winding_transformer", "pk_12_max"), 40, 30] transformer_tap_regulator = initialize_array(DatasetType.input, ComponentType.transformer_tap_regulator, 5) transformer_tap_regulator["id"] = [51, 52, 53, 54, 1] @@ -261,11 +261,13 @@ def original_data() -> dict[ComponentType, np.ndarray]: fault = initialize_array(DatasetType.input, ComponentType.fault, 20) fault["id"] = [1] + list(range(32, 51)) fault["status"] = [0, -1, 2] + 17 * [1] - fault["fault_type"] = 6 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [nan_type("fault", "fault_type"), 4] - fault["fault_phase"] = list(range(1, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [nan_type("fault", "fault_phase"), 7] + fault["fault_type"] = 6 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [_nan_type("fault", "fault_type"), 4] + fault["fault_phase"] = ( + list(range(1, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [_nan_type("fault", "fault_phase"), 7] + ) fault["fault_object"] = [200, 3] + list(range(10, 28, 2)) + 9 * [0] - fault["r_f"] = [-1.0, 0.0, 1.0] + 17 * [nan_type("fault", "r_f")] - fault["x_f"] = [-1.0, 0.0, 1.0] + 17 * [nan_type("fault", "x_f")] + fault["r_f"] = [-1.0, 0.0, 1.0] + 17 * [_nan_type("fault", "r_f")] + fault["x_f"] = [-1.0, 0.0, 1.0] + 17 * [_nan_type("fault", "x_f")] data = { ComponentType.node: node, @@ -654,7 +656,7 @@ def test_fault(input_data): ComponentType.fault, "fault_type", list(range(32, 51)), - 5 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [nan_type("fault", "fault_type"), 4], + 5 * [0] + 4 * [1] + 4 * [2] + 4 * [3] + [_nan_type("fault", "fault_type"), 4], ) in validation_errors ) @@ -663,7 +665,7 @@ def test_fault(input_data): ComponentType.fault, "fault_phase", list(range(32, 51)), - list(range(2, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [nan_type("fault", "fault_phase"), 7], + list(range(2, 7)) + [0, 4, 5, 6] + 2 * list(range(4)) + [_nan_type("fault", "fault_phase"), 7], ) in validation_errors ) diff --git a/tests/unit/validation/test_utils.py b/tests/unit/validation/test_utils.py index d2879dbaf..95634e960 100644 --- a/tests/unit/validation/test_utils.py +++ b/tests/unit/validation/test_utils.py @@ -7,39 +7,39 @@ from power_grid_model import initialize_array from power_grid_model.validation.errors import NotGreaterThanError -from power_grid_model.validation.utils import errors_to_string, eval_field_expression, update_input_data +from power_grid_model.validation.utils import _eval_field_expression, _update_input_data, errors_to_string def test_eval_field_expression(): data = np.array([(1.0, 2.0, 4.0), (8.0, 16.0, 0.0)], dtype=[("a", "f8"), ("b2", "f8"), ("c_3", "f8")]) - np.testing.assert_array_equal(eval_field_expression(data, "a"), np.array([1, 8])) - np.testing.assert_array_equal(eval_field_expression(data, "a/b2"), np.array([0.5, 0.5])) - np.testing.assert_array_equal(eval_field_expression(data, "a / b2"), np.array([0.5, 0.5])) - np.testing.assert_array_equal(eval_field_expression(data, "a / c_3"), np.array([0.25, np.nan])) + np.testing.assert_array_equal(_eval_field_expression(data, "a"), np.array([1, 8])) + np.testing.assert_array_equal(_eval_field_expression(data, "a/b2"), np.array([0.5, 0.5])) + np.testing.assert_array_equal(_eval_field_expression(data, "a / b2"), np.array([0.5, 0.5])) + np.testing.assert_array_equal(_eval_field_expression(data, "a / c_3"), np.array([0.25, np.nan])) with pytest.raises(ValueError): - eval_field_expression(data, "a / 1") + _eval_field_expression(data, "a / 1") with pytest.raises(ValueError): - eval_field_expression(data, "a + 100") + _eval_field_expression(data, "a + 100") with pytest.raises(ValueError): - eval_field_expression(data, "a + 100.123") + _eval_field_expression(data, "a + 100.123") with pytest.raises(ValueError): - eval_field_expression(data, "a + b2") + _eval_field_expression(data, "a + b2") with pytest.raises(ValueError): - eval_field_expression(data, "a - b2") + _eval_field_expression(data, "a - b2") with pytest.raises(ValueError): - eval_field_expression(data, "a * b2") + _eval_field_expression(data, "a * b2") with pytest.raises(ValueError): - eval_field_expression(data, "a * -b2") + _eval_field_expression(data, "a * -b2") with pytest.raises(ValueError): - eval_field_expression(data, "a + b2 + c_3") + _eval_field_expression(data, "a + b2 + c_3") with pytest.raises(ValueError): - eval_field_expression(data, "max(a, b2)") + _eval_field_expression(data, "max(a, b2)") with pytest.raises(ValueError): - eval_field_expression(data, "(a + b2) / c_3") + _eval_field_expression(data, "(a + b2) / c_3") with pytest.raises(KeyError): - eval_field_expression(data, "a / b") + _eval_field_expression(data, "a / b") def assert_list_of_numpy_arrays_equal(expected, actual): @@ -70,7 +70,7 @@ def test_update_input_data(): update_test["id"] = [6, 5, 2, 3] update_test["q_specified"] = [np.nan, 5.2, np.nan, 3.2] - merged = update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) + merged = _update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) np.testing.assert_array_equal(merged["sym_load"]["id"], [4, 5, 6, 1, 2, 3]) np.testing.assert_array_equal(merged["sym_load"]["p_specified"], [4.0, 5.0, 6.0, 1.0, 2.0, 3.0]) np.testing.assert_array_equal(merged["sym_load"]["q_specified"], [4.1, 5.2, 6.1, np.nan, np.nan, 3.2]) @@ -85,7 +85,7 @@ def test_update_input_data__without_ids(): update_test = initialize_array("update", "sym_load", 6) input_test["q_specified"] = [4.1, 5.2, np.nan, np.nan, np.nan, 3.2] - merged = update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) + merged = _update_input_data(input_data={"sym_load": input_test}, update_data={"sym_load": update_test}) np.testing.assert_array_equal(merged["sym_load"]["id"], [4, 5, 6, 1, 2, 3]) np.testing.assert_array_equal(merged["sym_load"]["p_specified"], [4.0, 5.0, 6.0, 1.0, 2.0, 3.0]) np.testing.assert_array_equal(merged["sym_load"]["q_specified"], [4.1, 5.2, np.nan, np.nan, np.nan, 3.2]) @@ -100,7 +100,7 @@ def test_update_input_data_int_nan(): update_line["id"] = [1, 3] update_line["from_status"] = [-128, 1] - merged = update_input_data(input_data={"line": input_line}, update_data={"line": update_line}) + merged = _update_input_data(input_data={"line": input_line}, update_data={"line": update_line}) np.testing.assert_array_equal(merged["line"]["from_status"], [0, -128, 1]) @@ -114,7 +114,7 @@ def test_update_input_data_asym_nans(): update_load["id"] = [1, 2, 3] update_load["p_specified"] = [[np.nan, np.nan, np.nan], [np.nan, np.nan, 5.3], [6.1, 6.2, 6.3]] - merged = update_input_data(input_data={"asym_load": input_load}, update_data={"asym_load": update_load}) + merged = _update_input_data(input_data={"asym_load": input_load}, update_data={"asym_load": update_load}) np.testing.assert_array_equal( merged["asym_load"]["p_specified"], [[1.1, 1.2, 1.3], [2.1, np.nan, 5.3], [6.1, 6.2, 6.3]] diff --git a/tests/unit/validation/test_validation_functions.py b/tests/unit/validation/test_validation_functions.py index 6eebc5c44..6349106a9 100644 --- a/tests/unit/validation/test_validation_functions.py +++ b/tests/unit/validation/test_validation_functions.py @@ -11,14 +11,7 @@ from power_grid_model import CalculationType, LoadGenType, MeasuredTerminalType, initialize_array, power_grid_meta_data from power_grid_model._core.dataset_definitions import ComponentType, DatasetType from power_grid_model._utils import compatibility_convert_row_columnar_dataset -from power_grid_model.enum import ( - Branch3Side, - BranchSide, - CalculationType, - ComponentAttributeFilterOptions, - FaultType, - TapChangingStrategy, -) +from power_grid_model.enum import Branch3Side, BranchSide, CalculationType, ComponentAttributeFilterOptions, FaultType from power_grid_model.validation import assert_valid_input_data from power_grid_model.validation.errors import ( IdNotInDatasetError, @@ -649,17 +642,17 @@ def single_component_twice_data(): @pytest.mark.parametrize("measured_terminal_type", MeasuredTerminalType) @patch("power_grid_model.validation.validation.validate_base", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_greater_than_zero", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_enum_values", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_ids") +@patch("power_grid_model.validation.validation._all_greater_than_zero", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_enum_values", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_ids") def test_validate_generic_power_sensor__all_terminal_types( - all_valid_ids: MagicMock, measured_terminal_type: MeasuredTerminalType + _all_valid_ids: MagicMock, measured_terminal_type: MeasuredTerminalType ): # Act validate_generic_power_sensor(data={}, component="") # type: ignore # Assert - all_valid_ids.assert_any_call( + _all_valid_ids.assert_any_call( ANY, ANY, field=ANY, ref_components=ANY, measured_terminal_type=measured_terminal_type ) @@ -680,17 +673,17 @@ def test_validate_generic_power_sensor__all_terminal_types( ], ) @patch("power_grid_model.validation.validation.validate_base", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_greater_than_zero", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_enum_values", new=MagicMock()) -@patch("power_grid_model.validation.validation.all_valid_ids") +@patch("power_grid_model.validation.validation._all_greater_than_zero", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_enum_values", new=MagicMock()) +@patch("power_grid_model.validation.validation._all_valid_ids") def test_validate_generic_power_sensor__terminal_types( - all_valid_ids: MagicMock, ref_component: str | list[str], measured_terminal_type: MeasuredTerminalType + _all_valid_ids: MagicMock, ref_component: str | list[str], measured_terminal_type: MeasuredTerminalType ): # Act validate_generic_power_sensor(data={}, component="") # type: ignore # Assert - all_valid_ids.assert_any_call( + _all_valid_ids.assert_any_call( ANY, ANY, field=ANY, ref_components=ref_component, measured_terminal_type=measured_terminal_type )