power-grid-model-ds 0.0.1a11709467271__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- power_grid_model_ds/__init__.py +9 -0
- power_grid_model_ds/_core/__init__.py +0 -0
- power_grid_model_ds/_core/data_source/__init__.py +0 -0
- power_grid_model_ds/_core/data_source/generator/__init__.py +0 -0
- power_grid_model_ds/_core/data_source/generator/arrays/__init__.py +0 -0
- power_grid_model_ds/_core/data_source/generator/arrays/base.py +25 -0
- power_grid_model_ds/_core/data_source/generator/arrays/line.py +133 -0
- power_grid_model_ds/_core/data_source/generator/arrays/node.py +37 -0
- power_grid_model_ds/_core/data_source/generator/arrays/source.py +30 -0
- power_grid_model_ds/_core/data_source/generator/arrays/transformer.py +37 -0
- power_grid_model_ds/_core/data_source/generator/grid_generators.py +78 -0
- power_grid_model_ds/_core/fancypy.py +66 -0
- power_grid_model_ds/_core/load_flow.py +140 -0
- power_grid_model_ds/_core/model/__init__.py +0 -0
- power_grid_model_ds/_core/model/arrays/__init__.py +43 -0
- power_grid_model_ds/_core/model/arrays/base/__init__.py +0 -0
- power_grid_model_ds/_core/model/arrays/base/_build.py +166 -0
- power_grid_model_ds/_core/model/arrays/base/_filters.py +115 -0
- power_grid_model_ds/_core/model/arrays/base/_modify.py +64 -0
- power_grid_model_ds/_core/model/arrays/base/_optional.py +11 -0
- power_grid_model_ds/_core/model/arrays/base/_string.py +94 -0
- power_grid_model_ds/_core/model/arrays/base/array.py +325 -0
- power_grid_model_ds/_core/model/arrays/base/errors.py +17 -0
- power_grid_model_ds/_core/model/arrays/pgm_arrays.py +122 -0
- power_grid_model_ds/_core/model/constants.py +27 -0
- power_grid_model_ds/_core/model/containers/__init__.py +0 -0
- power_grid_model_ds/_core/model/containers/base.py +244 -0
- power_grid_model_ds/_core/model/containers/grid_protocol.py +22 -0
- power_grid_model_ds/_core/model/dtypes/__init__.py +0 -0
- power_grid_model_ds/_core/model/dtypes/appliances.py +39 -0
- power_grid_model_ds/_core/model/dtypes/branches.py +117 -0
- power_grid_model_ds/_core/model/dtypes/id.py +19 -0
- power_grid_model_ds/_core/model/dtypes/nodes.py +27 -0
- power_grid_model_ds/_core/model/dtypes/regulators.py +30 -0
- power_grid_model_ds/_core/model/dtypes/sensors.py +63 -0
- power_grid_model_ds/_core/model/enums/__init__.py +0 -0
- power_grid_model_ds/_core/model/enums/nodes.py +16 -0
- power_grid_model_ds/_core/model/graphs/__init__.py +0 -0
- power_grid_model_ds/_core/model/graphs/container.py +158 -0
- power_grid_model_ds/_core/model/graphs/errors.py +19 -0
- power_grid_model_ds/_core/model/graphs/models/__init__.py +7 -0
- power_grid_model_ds/_core/model/graphs/models/_rustworkx_search.py +63 -0
- power_grid_model_ds/_core/model/graphs/models/base.py +326 -0
- power_grid_model_ds/_core/model/graphs/models/rustworkx.py +119 -0
- power_grid_model_ds/_core/model/grids/__init__.py +0 -0
- power_grid_model_ds/_core/model/grids/_text_sources.py +119 -0
- power_grid_model_ds/_core/model/grids/base.py +434 -0
- power_grid_model_ds/_core/model/grids/helpers.py +122 -0
- power_grid_model_ds/_core/utils/__init__.py +0 -0
- power_grid_model_ds/_core/utils/misc.py +41 -0
- power_grid_model_ds/_core/utils/pickle.py +47 -0
- power_grid_model_ds/_core/utils/zip.py +72 -0
- power_grid_model_ds/arrays.py +39 -0
- power_grid_model_ds/constants.py +7 -0
- power_grid_model_ds/enums.py +7 -0
- power_grid_model_ds/errors.py +27 -0
- power_grid_model_ds/fancypy.py +9 -0
- power_grid_model_ds/generators.py +11 -0
- power_grid_model_ds/graph_models.py +8 -0
- power_grid_model_ds-0.0.1a11709467271.dist-info/LICENSE +292 -0
- power_grid_model_ds-0.0.1a11709467271.dist-info/METADATA +80 -0
- power_grid_model_ds-0.0.1a11709467271.dist-info/RECORD +64 -0
- power_grid_model_ds-0.0.1a11709467271.dist-info/WHEEL +5 -0
- power_grid_model_ds-0.0.1a11709467271.dist-info/top_level.txt +1 -0
@@ -0,0 +1,166 @@
|
|
1
|
+
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MPL-2.0
|
4
|
+
|
5
|
+
"""Contains the build_array function."""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from collections.abc import Sized
|
9
|
+
from typing import Any, Iterable
|
10
|
+
|
11
|
+
import numpy as np
|
12
|
+
|
13
|
+
from power_grid_model_ds._core.model.constants import empty
|
14
|
+
from power_grid_model_ds._core.utils.misc import is_sequence
|
15
|
+
|
16
|
+
|
17
|
+
def build_array(*args: tuple[Any], dtype: np.dtype, defaults: dict[str, np.generic], **kwargs) -> np.ndarray:
|
18
|
+
"""Constructs the array from the given args/kwargs."""
|
19
|
+
parsed_input, size = _parse_input(*args, dtype=dtype, **kwargs)
|
20
|
+
|
21
|
+
array: np.ndarray = np.zeros(size, dtype=dtype)
|
22
|
+
_fill_defaults(array, defaults)
|
23
|
+
|
24
|
+
if not size:
|
25
|
+
return array
|
26
|
+
|
27
|
+
if isinstance(parsed_input, np.ndarray) and parsed_input.dtype.names:
|
28
|
+
_check_missing_columns(array.dtype.names, defaults, set(parsed_input.dtype.names))
|
29
|
+
return _parse_structured_array(parsed_input, array)
|
30
|
+
if isinstance(parsed_input, np.ndarray):
|
31
|
+
# Note: defaults are not supported when working with unstructured arrays
|
32
|
+
return _parse_array(parsed_input, array.dtype)
|
33
|
+
|
34
|
+
_check_missing_columns(array.dtype.names, defaults, set(parsed_input.keys()))
|
35
|
+
_fill_with_kwargs(array, parsed_input)
|
36
|
+
return array
|
37
|
+
|
38
|
+
|
39
|
+
def _parse_input(*args: Any, dtype: np.dtype, **kwargs):
|
40
|
+
"""Combines the args and kwargs to a dict."""
|
41
|
+
columns: list[str] = list(dtype.names) if dtype.names else []
|
42
|
+
if args and kwargs:
|
43
|
+
raise TypeError("Cannot construct from both args and kwargs")
|
44
|
+
|
45
|
+
if args and isinstance(args[0], np.ndarray):
|
46
|
+
return args[0], len(args[0])
|
47
|
+
if args and isinstance(args[0], Iterable):
|
48
|
+
kwargs = _args2kwargs(args, columns)
|
49
|
+
elif args:
|
50
|
+
raise TypeError(f"Invalid args: {args}")
|
51
|
+
|
52
|
+
if kwargs:
|
53
|
+
return _parse_kwargs(kwargs, columns)
|
54
|
+
return {}, 0
|
55
|
+
|
56
|
+
|
57
|
+
def _check_missing_columns(array_columns: tuple, defaults: dict[str, np.generic], provided_columns: set[str]):
|
58
|
+
required_columns = set(array_columns) - set(defaults.keys())
|
59
|
+
if missing_columns := required_columns - provided_columns:
|
60
|
+
raise ValueError(f"Missing required columns: {missing_columns}")
|
61
|
+
|
62
|
+
|
63
|
+
def _fill_defaults(array: np.ndarray, defaults: dict[str, np.generic]):
|
64
|
+
"""Fills the defaults into the array."""
|
65
|
+
for column, default in defaults.items():
|
66
|
+
if default is empty:
|
67
|
+
array[column] = empty(array.dtype[column]) # type: ignore[call-overload]
|
68
|
+
else:
|
69
|
+
array[column] = default # type: ignore[call-overload]
|
70
|
+
|
71
|
+
|
72
|
+
def _fill_with_kwargs(array: np.ndarray, kwargs: dict[str, np.ndarray]):
|
73
|
+
"""Fills the kwargs into the array."""
|
74
|
+
for column, values in kwargs.items():
|
75
|
+
array[column] = values # type: ignore[call-overload]
|
76
|
+
|
77
|
+
|
78
|
+
def _parse_structured_array(from_array: np.ndarray, to_array: np.ndarray) -> np.ndarray:
|
79
|
+
shared_columns, ignored_columns = _determine_column_overlap(from_array, to_array)
|
80
|
+
if ignored_columns:
|
81
|
+
logging.debug(
|
82
|
+
"Ignored provided columns %s during build of array with columns %s", ignored_columns, to_array.dtype.names
|
83
|
+
)
|
84
|
+
to_array[shared_columns] = from_array[shared_columns] # type: ignore[index]
|
85
|
+
return to_array
|
86
|
+
|
87
|
+
|
88
|
+
def _determine_column_overlap(from_array: np.ndarray, to_array: np.ndarray) -> tuple[list[str], list[str]]:
|
89
|
+
"""Returns two lists: columns present in both arrays and the columns that are only present in from_array"""
|
90
|
+
from_columns = set(from_array.dtype.names)
|
91
|
+
to_columns = set(to_array.dtype.names)
|
92
|
+
|
93
|
+
return list(from_columns & to_columns), list(from_columns - to_columns)
|
94
|
+
|
95
|
+
|
96
|
+
def _parse_array(array: np.ndarray, dtype: np.dtype):
|
97
|
+
if len(array.shape) == 1 and array.dtype == dtype:
|
98
|
+
return array
|
99
|
+
if len(array.shape) == 1:
|
100
|
+
return np.array(array, dtype=dtype)
|
101
|
+
if len(array.shape) == 2:
|
102
|
+
return _parse_2d_array(array, dtype)
|
103
|
+
raise NotImplementedError(f"Unsupported array shape {array.shape}")
|
104
|
+
|
105
|
+
|
106
|
+
def _parse_2d_array(array: np.ndarray, dtype: np.dtype):
|
107
|
+
"""Parses the 2d array to a 1d array."""
|
108
|
+
columns: list[str] = list(dtype.names) if dtype.names else []
|
109
|
+
if len(columns) not in array.shape:
|
110
|
+
raise ValueError(f"Cannot convert array of shape {array.shape} into {len(columns)} columns.")
|
111
|
+
column_dim = 0 if len(columns) == array.shape[0] else 1
|
112
|
+
size_dim = 1 if column_dim == 0 else 0
|
113
|
+
new_array = np.ones(array.shape[size_dim], dtype=dtype)
|
114
|
+
for index, column in enumerate(columns):
|
115
|
+
if column_dim == 0:
|
116
|
+
new_array[column] = array[index, :] # type: ignore[call-overload]
|
117
|
+
else:
|
118
|
+
new_array[column] = array[:, index] # type: ignore[call-overload]
|
119
|
+
return new_array
|
120
|
+
|
121
|
+
|
122
|
+
def _parse_kwargs(kwargs: dict[str, list | np.ndarray], columns: list[str]) -> tuple[dict[str, np.ndarray], int]:
|
123
|
+
"""Parses the kwargs to a dict of np.ndarrays."""
|
124
|
+
parsed_kwargs = {}
|
125
|
+
|
126
|
+
size = 0
|
127
|
+
for column, values in kwargs.items():
|
128
|
+
parsed_kwargs[column] = np.array(values).flatten()
|
129
|
+
|
130
|
+
value_size = _get_size(values)
|
131
|
+
if size == 0:
|
132
|
+
size = value_size
|
133
|
+
elif size != len(values):
|
134
|
+
raise ValueError(f"Size of column '{column}' does not match other columns.")
|
135
|
+
|
136
|
+
if invalid_columns := set(parsed_kwargs.keys()) - set(columns):
|
137
|
+
raise ValueError(f"Invalid columns: {invalid_columns}")
|
138
|
+
return parsed_kwargs, size
|
139
|
+
|
140
|
+
|
141
|
+
def _get_size(values: Sized):
|
142
|
+
"""Returns the size of the values."""
|
143
|
+
if is_sequence(values):
|
144
|
+
return len(values)
|
145
|
+
return 1
|
146
|
+
|
147
|
+
|
148
|
+
def _args2kwargs(args: tuple[Any, ...], columns: list[str]) -> dict[str, list]:
|
149
|
+
"""Parses the args to kwargs."""
|
150
|
+
kwargs = {}
|
151
|
+
if len(args) == 1:
|
152
|
+
args = args[0]
|
153
|
+
|
154
|
+
args_as_array = np.array(args)
|
155
|
+
if len(args_as_array.shape) != 2:
|
156
|
+
raise ValueError(
|
157
|
+
"Cannot parse args: input is not 2D, probably due to an inconsistent number of values per row."
|
158
|
+
)
|
159
|
+
|
160
|
+
_, args_n_columns = args_as_array.shape
|
161
|
+
if args_n_columns != len(columns):
|
162
|
+
raise ValueError(f"Cannot parse args: requires {len(columns)} columns per row, got {args_n_columns}.")
|
163
|
+
|
164
|
+
for index, column in enumerate(columns):
|
165
|
+
kwargs[column] = [row[index] for row in args]
|
166
|
+
return kwargs
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MPL-2.0
|
4
|
+
|
5
|
+
from typing import Any, Iterable, Literal
|
6
|
+
|
7
|
+
import numpy as np
|
8
|
+
from numpy.typing import NDArray
|
9
|
+
|
10
|
+
from power_grid_model_ds._core.model.arrays.base.errors import MultipleRecordsReturned, RecordDoesNotExist
|
11
|
+
from power_grid_model_ds._core.utils.misc import is_sequence
|
12
|
+
|
13
|
+
|
14
|
+
def get_filter_mask(
|
15
|
+
*args: int | Iterable[int] | np.ndarray,
|
16
|
+
array: np.ndarray,
|
17
|
+
mode_: Literal["AND", "OR"],
|
18
|
+
**kwargs: Any | list[Any] | np.ndarray,
|
19
|
+
) -> np.ndarray:
|
20
|
+
"""Returns a mask that matches the input parameters."""
|
21
|
+
parsed_kwargs = _parse(args, kwargs)
|
22
|
+
|
23
|
+
if invalid_kwargs := set(parsed_kwargs.keys()) - set(array.dtype.names):
|
24
|
+
raise ValueError(f"Invalid kwargs: {invalid_kwargs}")
|
25
|
+
|
26
|
+
filter_mask = _initialize_filter_mask(mode_, array.size)
|
27
|
+
for field, values in parsed_kwargs.items():
|
28
|
+
field_mask = _build_filter_mask_for_field(array, field, values)
|
29
|
+
if mode_ == "AND":
|
30
|
+
filter_mask &= field_mask
|
31
|
+
elif mode_ == "OR":
|
32
|
+
filter_mask |= field_mask
|
33
|
+
else:
|
34
|
+
raise ValueError(f"Invalid mode: {mode_}, must be 'AND' or 'OR'")
|
35
|
+
return filter_mask
|
36
|
+
|
37
|
+
|
38
|
+
def apply_filter(
|
39
|
+
*args: int | Iterable[int] | np.ndarray,
|
40
|
+
array: np.ndarray,
|
41
|
+
mode_: Literal["AND", "OR"],
|
42
|
+
**kwargs: Any | list[Any] | np.ndarray,
|
43
|
+
) -> np.ndarray:
|
44
|
+
"""Return an array with the records that match the input parameters.
|
45
|
+
Note: output could be an empty array."""
|
46
|
+
filter_mask = get_filter_mask(*args, array=array, mode_=mode_, **kwargs)
|
47
|
+
return array[filter_mask]
|
48
|
+
|
49
|
+
|
50
|
+
def apply_exclude(
|
51
|
+
*args: int | Iterable[int] | np.ndarray,
|
52
|
+
array: np.ndarray,
|
53
|
+
mode_: Literal["AND", "OR"],
|
54
|
+
**kwargs: Any | list[Any] | np.ndarray,
|
55
|
+
) -> np.ndarray:
|
56
|
+
"""Return an array without records that match the input parameters.
|
57
|
+
Note: output could be an empty array."""
|
58
|
+
filter_mask = get_filter_mask(*args, array=array, mode_=mode_, **kwargs)
|
59
|
+
return array[~filter_mask]
|
60
|
+
|
61
|
+
|
62
|
+
def apply_get(
|
63
|
+
*args: int | Iterable[int] | np.ndarray,
|
64
|
+
array: np.ndarray,
|
65
|
+
mode_: Literal["AND", "OR"],
|
66
|
+
**kwargs: Any | list[Any] | np.ndarray,
|
67
|
+
) -> np.ndarray:
|
68
|
+
"""Returns a record that matches the input parameters.
|
69
|
+
If no or multiple records match the input parameters, an error is raised.
|
70
|
+
"""
|
71
|
+
filtered_array = apply_filter(*args, array=array, mode_=mode_, **kwargs)
|
72
|
+
if filtered_array.size == 1:
|
73
|
+
return filtered_array
|
74
|
+
|
75
|
+
args_str = f"\n\twith args: {args[0]}" if args else ""
|
76
|
+
kwargs_str = f"\n\twith kwargs: {kwargs}" if kwargs else ""
|
77
|
+
if filtered_array.size == 0:
|
78
|
+
raise RecordDoesNotExist(f"No record found! {args_str}{kwargs_str}")
|
79
|
+
raise MultipleRecordsReturned(f"Found more than one record! {args_str}{kwargs_str}")
|
80
|
+
|
81
|
+
|
82
|
+
def _build_filter_mask_for_field(array: np.ndarray, field: str, values) -> np.ndarray:
|
83
|
+
if not is_sequence(values):
|
84
|
+
# Note: is_sequence() does not consider a string as a sequence.
|
85
|
+
values = [values]
|
86
|
+
|
87
|
+
if not len(values): # pylint: disable=use-implicit-booleaness-not-len
|
88
|
+
return np.full(array.size, False)
|
89
|
+
if isinstance(values, set):
|
90
|
+
values = list(values)
|
91
|
+
if len(values) == 1: # speed-up for single value
|
92
|
+
return array[field] == values[0]
|
93
|
+
return np.isin(array[field], values)
|
94
|
+
|
95
|
+
|
96
|
+
def _parse(args: tuple[int | Iterable[int] | NDArray, ...] | NDArray[np.int64], kwargs):
|
97
|
+
if not args and not kwargs:
|
98
|
+
raise TypeError("No input provided.")
|
99
|
+
if len(args) > 1:
|
100
|
+
raise ValueError("Cannot parse more than 1 positional argument.")
|
101
|
+
if len(args) == 1 and "id" in kwargs:
|
102
|
+
raise ValueError("Cannot parse both positional argument and keyword argument 'id'.")
|
103
|
+
if len(args) == 1 and isinstance(args[0], int):
|
104
|
+
kwargs.update({"id": args})
|
105
|
+
elif len(args) == 1:
|
106
|
+
kwargs.update({"id": args[0]})
|
107
|
+
return kwargs
|
108
|
+
|
109
|
+
|
110
|
+
def _initialize_filter_mask(mode_: Literal["AND", "OR"], size: int) -> np.ndarray:
|
111
|
+
if mode_ == "AND":
|
112
|
+
return np.full(size, True)
|
113
|
+
if mode_ == "OR":
|
114
|
+
return np.full(size, False)
|
115
|
+
raise ValueError(f"Invalid mode: {mode_}, must be 'AND' or 'OR'")
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MPL-2.0
|
4
|
+
|
5
|
+
"""Helper functions for arrays"""
|
6
|
+
|
7
|
+
import numpy as np
|
8
|
+
from numpy.typing import ArrayLike, NDArray
|
9
|
+
|
10
|
+
|
11
|
+
def re_order(array: np.ndarray, new_order: ArrayLike, column: str = "id") -> np.ndarray:
|
12
|
+
"""Re-order an id-array by the id column so that it follows a new_order.
|
13
|
+
Expects the new_order input to contain the same values as self.id
|
14
|
+
"""
|
15
|
+
if column not in array.dtype.names:
|
16
|
+
raise ValueError(f"Cannot re-order array: column {column} does not exist.")
|
17
|
+
if not np.array_equal(np.sort(array[column]), np.sort(new_order)):
|
18
|
+
raise ValueError(f"Cannot re-order array: mismatch between new_order and values in '{column}'-column.")
|
19
|
+
|
20
|
+
permutation_a = np.argsort(array[column])
|
21
|
+
permutation_b = np.argsort(new_order)
|
22
|
+
inverse = np.empty_like(new_order, dtype=int)
|
23
|
+
inverse[permutation_b] = np.arange(permutation_b.size)
|
24
|
+
new_order_indices = permutation_a[inverse]
|
25
|
+
return array[new_order_indices]
|
26
|
+
|
27
|
+
|
28
|
+
def update_by_id(array: np.ndarray, ids: ArrayLike, allow_missing: bool, **kwargs) -> NDArray[np.bool_]:
|
29
|
+
"""Update values in an array by id
|
30
|
+
|
31
|
+
Args:
|
32
|
+
array: the array to update
|
33
|
+
ids: the ids to update
|
34
|
+
allow_missing: whether to allow ids that do not exist in the array
|
35
|
+
**kwargs: the columns to update and their new values
|
36
|
+
Returns:
|
37
|
+
mask: the mask on the original array for the provided ids
|
38
|
+
"""
|
39
|
+
mask = np.isin(array["id"], ids)
|
40
|
+
if not allow_missing:
|
41
|
+
nr_hits = np.sum(mask)
|
42
|
+
nr_ids = np.unique(ids).size # ignore edge cases with duplicate ids
|
43
|
+
if nr_hits != nr_ids:
|
44
|
+
raise ValueError("One or more ids do not exist. Provide allow_missing=True if this is intended.")
|
45
|
+
|
46
|
+
for name, values in kwargs.items():
|
47
|
+
array[name][mask] = values
|
48
|
+
return mask
|
49
|
+
|
50
|
+
|
51
|
+
def check_ids(array: np.ndarray, return_duplicates: bool = False) -> NDArray | None:
|
52
|
+
"""Check for duplicate ids within the array"""
|
53
|
+
if "id" not in array.dtype.names:
|
54
|
+
raise AttributeError("Array has no 'id' column.")
|
55
|
+
|
56
|
+
unique, counts = np.unique(array["id"], return_counts=True)
|
57
|
+
duplicate_mask = counts > 1
|
58
|
+
duplicates = unique[duplicate_mask]
|
59
|
+
|
60
|
+
if return_duplicates:
|
61
|
+
return duplicates
|
62
|
+
if duplicates.size > 0:
|
63
|
+
raise ValueError(f"Found duplicate ids in array: {duplicates}")
|
64
|
+
return None
|
@@ -0,0 +1,11 @@
|
|
1
|
+
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MPL-2.0
|
4
|
+
|
5
|
+
"""Optional dependencies for the arrays module."""
|
6
|
+
|
7
|
+
try:
|
8
|
+
# pylint: disable=unused-import
|
9
|
+
import pandas
|
10
|
+
except ImportError:
|
11
|
+
pandas = None
|
@@ -0,0 +1,94 @@
|
|
1
|
+
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MPL-2.0
|
4
|
+
|
5
|
+
"""Module for array to string conversion."""
|
6
|
+
|
7
|
+
from typing import TYPE_CHECKING, Optional
|
8
|
+
|
9
|
+
import numpy as np
|
10
|
+
|
11
|
+
from power_grid_model_ds._core import fancypy as fp
|
12
|
+
|
13
|
+
if TYPE_CHECKING:
|
14
|
+
from power_grid_model_ds._core.model.arrays.base.array import FancyArray
|
15
|
+
|
16
|
+
|
17
|
+
def convert_array_to_string(array: "FancyArray", rows: int = 10, column_width: int | str = "auto") -> str:
|
18
|
+
"""Return a string representation of the array as a table.
|
19
|
+
|
20
|
+
Args:
|
21
|
+
column_width: the width of each column in the table. Will be determined by its contents if set to "auto".
|
22
|
+
rows: the number of rows to show. If the array is larger than this, the middle rows are hidden.
|
23
|
+
"""
|
24
|
+
start_rows, end_rows = _get_start_and_end_rows(array, rows)
|
25
|
+
if end_rows is not None:
|
26
|
+
rows_to_print = fp.concatenate(start_rows, end_rows)
|
27
|
+
else:
|
28
|
+
rows_to_print = start_rows
|
29
|
+
|
30
|
+
match column_width:
|
31
|
+
case "auto":
|
32
|
+
column_widths = _determine_column_widths(rows_to_print)
|
33
|
+
case int():
|
34
|
+
column_widths = [(column, column_width) for column in array.dtype.names]
|
35
|
+
case _:
|
36
|
+
raise NotImplementedError(f"column_width={column_width} is not supported. Use 'auto' or int.")
|
37
|
+
|
38
|
+
header = "|".join(f"{_center_and_truncate(column, width)}" for column, width in column_widths) + "\n"
|
39
|
+
if end_rows is None:
|
40
|
+
body = _rows_to_strings(rows_to_print, column_widths)
|
41
|
+
return header + "\n".join(body)
|
42
|
+
|
43
|
+
start_rows = _rows_to_strings(start_rows, column_widths)
|
44
|
+
cutoff_line = f"(..{array.size - rows_to_print.size} hidden rows..)".center(len(header))
|
45
|
+
end_rows = _rows_to_strings(end_rows, column_widths)
|
46
|
+
return header + "\n".join(start_rows) + "\n" + cutoff_line + "\n" + "\n".join(end_rows)
|
47
|
+
|
48
|
+
|
49
|
+
def _rows_to_strings(rows: "FancyArray", column_widths: list[tuple[str, int]]) -> list[str]:
|
50
|
+
rows_as_strings = []
|
51
|
+
for row in rows.data:
|
52
|
+
row_as_strings = []
|
53
|
+
for attr, (_, width) in zip(row.tolist(), column_widths):
|
54
|
+
row_as_strings.append(_center_and_truncate(str(attr), width))
|
55
|
+
rows_as_strings.append("|".join(row_as_strings))
|
56
|
+
return rows_as_strings
|
57
|
+
|
58
|
+
|
59
|
+
def _determine_column_widths(array: "FancyArray") -> list[tuple[str, int]]:
|
60
|
+
"""Get the maximum width of each column in the array."""
|
61
|
+
column_widths: list[tuple[str, int]] = []
|
62
|
+
if not array.dtype.names:
|
63
|
+
return column_widths
|
64
|
+
|
65
|
+
for column in array.dtype.names:
|
66
|
+
data = array.data[column]
|
67
|
+
if data.size:
|
68
|
+
# if float, round to 3 decimals
|
69
|
+
if data.dtype.kind == "f":
|
70
|
+
data = np.around(data, decimals=3)
|
71
|
+
# to string to get the length
|
72
|
+
data = data.astype(str)
|
73
|
+
longest_string = max(data, key=len)
|
74
|
+
if len(column) > len(longest_string):
|
75
|
+
longest_string = column
|
76
|
+
else:
|
77
|
+
longest_string = column
|
78
|
+
column_widths.append((column, len(longest_string) + 2))
|
79
|
+
return column_widths
|
80
|
+
|
81
|
+
|
82
|
+
def _center_and_truncate(string: str, width: int) -> str:
|
83
|
+
if len(string) <= width:
|
84
|
+
return string.center(width)
|
85
|
+
return f"{string[:width - 2]}..".center(width)
|
86
|
+
|
87
|
+
|
88
|
+
def _get_start_and_end_rows(array: "FancyArray", rows: int) -> tuple["FancyArray", Optional["FancyArray"]]:
|
89
|
+
if array.size <= rows:
|
90
|
+
return array, None
|
91
|
+
cutoff = rows // 2
|
92
|
+
start_rows = array[:cutoff]
|
93
|
+
end_rows = array[-cutoff:]
|
94
|
+
return start_rows, end_rows
|