power-grid-model-io 1.3.6__py3-none-any.whl → 1.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of power-grid-model-io might be problematic. Click here for more details.
- power_grid_model_io/converters/base_converter.py +9 -6
- power_grid_model_io/converters/pandapower_converter.py +155 -117
- power_grid_model_io/converters/pgm_json_converter.py +21 -16
- power_grid_model_io/converters/tabular_converter.py +28 -22
- power_grid_model_io/converters/vision_excel_converter.py +3 -3
- power_grid_model_io/data_stores/excel_file_store.py +5 -5
- power_grid_model_io/data_stores/json_file_store.py +1 -1
- power_grid_model_io/data_types/_data_types.py +2 -2
- power_grid_model_io/data_types/tabular_data.py +7 -7
- power_grid_model_io/functions/filters.py +2 -2
- power_grid_model_io/mappings/tabular_mapping.py +3 -3
- power_grid_model_io/mappings/value_mapping.py +2 -2
- power_grid_model_io/utils/auto_id.py +2 -2
- power_grid_model_io/utils/download.py +4 -4
- power_grid_model_io/utils/uuid_excel_cvtr.py +3 -3
- {power_grid_model_io-1.3.6.dist-info → power_grid_model_io-1.3.7.dist-info}/METADATA +1 -1
- {power_grid_model_io-1.3.6.dist-info → power_grid_model_io-1.3.7.dist-info}/RECORD +20 -20
- {power_grid_model_io-1.3.6.dist-info → power_grid_model_io-1.3.7.dist-info}/WHEEL +0 -0
- {power_grid_model_io-1.3.6.dist-info → power_grid_model_io-1.3.7.dist-info}/licenses/LICENSE +0 -0
- {power_grid_model_io-1.3.6.dist-info → power_grid_model_io-1.3.7.dist-info}/top_level.txt +0 -0
|
@@ -8,11 +8,12 @@ Power Grid Model 'Converter': Load and store power grid model data in the native
|
|
|
8
8
|
import json
|
|
9
9
|
import logging
|
|
10
10
|
import warnings
|
|
11
|
+
from enum import Enum
|
|
11
12
|
from pathlib import Path
|
|
12
|
-
from typing import Any, Dict, List, Optional
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
13
14
|
|
|
14
15
|
import numpy as np
|
|
15
|
-
from power_grid_model import initialize_array
|
|
16
|
+
from power_grid_model import ComponentType, DatasetType, initialize_array
|
|
16
17
|
from power_grid_model.data_types import ComponentList, Dataset, SingleDataset, SinglePythonDataset
|
|
17
18
|
from power_grid_model.utils import json_deserialize, json_serialize
|
|
18
19
|
|
|
@@ -44,15 +45,15 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
44
45
|
|
|
45
46
|
def __init__(
|
|
46
47
|
self,
|
|
47
|
-
source_file: Optional[
|
|
48
|
-
destination_file: Optional[
|
|
48
|
+
source_file: Optional[Path | str] = None,
|
|
49
|
+
destination_file: Optional[Path | str] = None,
|
|
49
50
|
log_level: int = logging.INFO,
|
|
50
51
|
):
|
|
51
52
|
source = JsonFileStore(file_path=Path(source_file)) if source_file else None
|
|
52
53
|
destination = JsonFileStore(file_path=Path(destination_file)) if destination_file else None
|
|
53
54
|
super().__init__(source=source, destination=destination, log_level=log_level)
|
|
54
55
|
|
|
55
|
-
def _parse_data(self, data: StructuredData, data_type:
|
|
56
|
+
def _parse_data(self, data: StructuredData, data_type: DatasetType, extra_info: Optional[ExtraInfo]) -> Dataset:
|
|
56
57
|
"""This function expects Structured data, which can either be a dictionary (single dataset) or a list of
|
|
57
58
|
dictionaries (batch dataset). The structured dataset consists of components + attributes that exist within
|
|
58
59
|
power-grid-model, but can also contain other data. If this data should be saved for later usage an extra_info
|
|
@@ -60,7 +61,8 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
60
61
|
|
|
61
62
|
Args:
|
|
62
63
|
data: Structured data, which can either be a dictionary or a list of dictionaries
|
|
63
|
-
data_type: the data type of the dataset, i.e.
|
|
64
|
+
data_type: the data type of the dataset, i.e. DatasetType.input, DatasetType.update,
|
|
65
|
+
DatasetType.sym_output or DatasetType.asym_output
|
|
64
66
|
extra_info: an optional dictionary where extra component info (that can't be specified in
|
|
65
67
|
power-grid-model data) can be specified
|
|
66
68
|
data: StructuredData:
|
|
@@ -69,7 +71,7 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
69
71
|
|
|
70
72
|
Returns:
|
|
71
73
|
a dictionary containing the components as keys and their corresponding numpy arrays as values: a
|
|
72
|
-
power-grid-model
|
|
74
|
+
power-grid-model DatasetType.input or DatasetType.update dataset
|
|
73
75
|
|
|
74
76
|
"""
|
|
75
77
|
self._log.debug(f"Loading PGM {data_type} data")
|
|
@@ -92,13 +94,13 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
92
94
|
return result
|
|
93
95
|
|
|
94
96
|
def _parse_dataset(
|
|
95
|
-
self, data: SinglePythonDataset, data_type:
|
|
97
|
+
self, data: SinglePythonDataset, data_type: DatasetType, extra_info: Optional[ExtraInfo]
|
|
96
98
|
) -> SingleDataset:
|
|
97
99
|
"""This function parses a single Python dataset and returns a power-grid-model input or update dictionary
|
|
98
100
|
|
|
99
101
|
Args:
|
|
100
102
|
data: a single Python dataset
|
|
101
|
-
data_type: the data type of the dataset, i.e.
|
|
103
|
+
data_type: the data type of the dataset, i.e. DatasetType.input or DatasetType.update
|
|
102
104
|
extra_info: an optional dictionary where extra component info (that can't be specified in
|
|
103
105
|
power-grid-model data) can be specified
|
|
104
106
|
data: SinglePythonDataset:
|
|
@@ -107,7 +109,7 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
107
109
|
|
|
108
110
|
Returns:
|
|
109
111
|
a dictionary containing the components as keys and their corresponding numpy arrays as values: a
|
|
110
|
-
power-grid-model
|
|
112
|
+
power-grid-model DatasetType.input or DatasetType.update dataset
|
|
111
113
|
|
|
112
114
|
"""
|
|
113
115
|
return {
|
|
@@ -119,7 +121,7 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
119
121
|
|
|
120
122
|
@staticmethod
|
|
121
123
|
def _parse_component(
|
|
122
|
-
objects: ComponentList, component:
|
|
124
|
+
objects: ComponentList, component: ComponentType, data_type: DatasetType, extra_info: Optional[ExtraInfo]
|
|
123
125
|
) -> np.ndarray:
|
|
124
126
|
"""This function generates a structured numpy array (power-grid-model native) from a structured dataset
|
|
125
127
|
|
|
@@ -127,7 +129,7 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
127
129
|
objects: a list with dictionaries, where each dictionary contains all attributes of a component
|
|
128
130
|
component: the type of component, eg. node, line, etc. Note: it should be a valid power-grid-model
|
|
129
131
|
component
|
|
130
|
-
data_type: a string specifying the data type: input/update
|
|
132
|
+
data_type: a string specifying the data type: DatasetType.input/DatasetType.update
|
|
131
133
|
extra_info: an optional dictionary where extra component info (that can't be specified in
|
|
132
134
|
power-grid-model data) can be specified
|
|
133
135
|
objects: ComponentList:
|
|
@@ -152,13 +154,15 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
152
154
|
try:
|
|
153
155
|
array[i][attribute] = value
|
|
154
156
|
except ValueError as ex:
|
|
155
|
-
raise ValueError(
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"Invalid '{attribute}' value for {component.value} {data_type.value} data: {ex}"
|
|
159
|
+
) from ex
|
|
156
160
|
|
|
157
161
|
# If an attribute doesn't exist, it is added to the extra_info lookup table
|
|
158
162
|
elif extra_info is not None:
|
|
159
163
|
obj_id = obj["id"]
|
|
160
164
|
if not isinstance(obj_id, int):
|
|
161
|
-
raise ValueError(f"Invalid 'id' value for {component} {data_type} data")
|
|
165
|
+
raise ValueError(f"Invalid 'id' value for {component.value} {data_type.value} data")
|
|
162
166
|
if obj_id not in extra_info:
|
|
163
167
|
extra_info[obj_id] = {}
|
|
164
168
|
extra_info[obj_id][attribute] = value
|
|
@@ -215,8 +219,9 @@ class PgmJsonConverter(BaseConverter[StructuredData]):
|
|
|
215
219
|
is_sparse_batch = isinstance(array, dict) and "indptr" in array and "data" in array
|
|
216
220
|
if is_batch is not None and is_batch != (is_dense_batch or is_sparse_batch):
|
|
217
221
|
raise ValueError(
|
|
218
|
-
f"Mixed {'' if is_batch else 'non-'}batch data "
|
|
219
|
-
f"
|
|
222
|
+
f"Mixed {'' if is_batch else 'non-'}batch data with "
|
|
223
|
+
f"{'non-' if is_batch else ''}batch data ("
|
|
224
|
+
f"{component.value if isinstance(component, Enum) else component})."
|
|
220
225
|
)
|
|
221
226
|
is_batch = is_dense_batch or is_sparse_batch
|
|
222
227
|
return bool(is_batch)
|
|
@@ -7,13 +7,14 @@ Tabular Data Converter: Load data from multiple tables and use a mapping file to
|
|
|
7
7
|
|
|
8
8
|
import inspect
|
|
9
9
|
import logging
|
|
10
|
+
from enum import Enum
|
|
10
11
|
from pathlib import Path
|
|
11
|
-
from typing import Any, Collection, Dict, List, Mapping, Optional,
|
|
12
|
+
from typing import Any, Collection, Dict, List, Mapping, Optional, cast
|
|
12
13
|
|
|
13
14
|
import numpy as np
|
|
14
15
|
import pandas as pd
|
|
15
16
|
import yaml
|
|
16
|
-
from power_grid_model import initialize_array
|
|
17
|
+
from power_grid_model import DatasetType, initialize_array
|
|
17
18
|
from power_grid_model.data_types import Dataset
|
|
18
19
|
|
|
19
20
|
from power_grid_model_io.converters.base_converter import BaseConverter
|
|
@@ -89,13 +90,13 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
89
90
|
if "multipliers" in mapping:
|
|
90
91
|
self._multipliers = MultiplierMapping(cast(Multipliers, mapping["multipliers"]), logger=self._log)
|
|
91
92
|
|
|
92
|
-
def _parse_data(self, data: TabularData, data_type:
|
|
93
|
+
def _parse_data(self, data: TabularData, data_type: DatasetType, extra_info: Optional[ExtraInfo]) -> Dataset:
|
|
93
94
|
"""This function parses tabular data and returns power-grid-model data
|
|
94
95
|
|
|
95
96
|
Args:
|
|
96
97
|
data: TabularData, i.e. a dictionary with the components as keys and pd.DataFrames as values, with
|
|
97
98
|
attribute names as columns and their values in the table
|
|
98
|
-
data_type: power-grid-model data type, i.e.
|
|
99
|
+
data_type: power-grid-model data type, i.e. DatasetType.input or DatasetType.update
|
|
99
100
|
extra_info: an optional dictionary where extra component info (that can't be specified in
|
|
100
101
|
power-grid-model data) can be specified
|
|
101
102
|
data: TabularData:
|
|
@@ -145,9 +146,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
145
146
|
def _convert_table_to_component( # pylint: disable = too-many-arguments,too-many-positional-arguments
|
|
146
147
|
self,
|
|
147
148
|
data: TabularData,
|
|
148
|
-
data_type: str,
|
|
149
|
+
data_type: str | Enum,
|
|
149
150
|
table: str,
|
|
150
|
-
component: str,
|
|
151
|
+
component: str | Enum,
|
|
151
152
|
attributes: InstanceAttributes,
|
|
152
153
|
extra_info: Optional[ExtraInfo],
|
|
153
154
|
) -> Optional[np.ndarray]:
|
|
@@ -157,7 +158,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
157
158
|
|
|
158
159
|
Args:
|
|
159
160
|
data: The full dataset with tabular data
|
|
160
|
-
data_type: The data type, i.e.
|
|
161
|
+
data_type: The data type, i.e. DatasetType.input or DatasetType.update
|
|
161
162
|
table: The name of the table that should be converter
|
|
162
163
|
component: the component for which a power-grid-model array should be made
|
|
163
164
|
attributes: a dictionary with a mapping from the attribute names in the table to the corresponding
|
|
@@ -165,9 +166,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
165
166
|
extra_info: an optional dictionary where extra component info (that can't be specified in
|
|
166
167
|
power-grid-model data) can be specified
|
|
167
168
|
data: TabularData:
|
|
168
|
-
data_type: str:
|
|
169
|
+
data_type: str | Enum:
|
|
169
170
|
table: str:
|
|
170
|
-
component: str:
|
|
171
|
+
component: str | Enum:
|
|
171
172
|
attributes: InstanceAttributes:
|
|
172
173
|
extra_info: Optional[ExtraInfo]:
|
|
173
174
|
|
|
@@ -187,13 +188,16 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
187
188
|
|
|
188
189
|
n_records = np.sum(table_mask) if table_mask is not None else len(data[table])
|
|
189
190
|
|
|
191
|
+
component_str = component.value if isinstance(component, Enum) else component
|
|
192
|
+
data_type_str = data_type.value if isinstance(data_type, Enum) else data_type
|
|
193
|
+
|
|
190
194
|
try:
|
|
191
|
-
pgm_data = initialize_array(data_type=
|
|
195
|
+
pgm_data = initialize_array(data_type=data_type_str, component_type=component_str, shape=n_records)
|
|
192
196
|
except KeyError as ex:
|
|
193
|
-
raise KeyError(f"Invalid component type '{
|
|
197
|
+
raise KeyError(f"Invalid component type '{component_str}' or data type '{data_type_str}'") from ex
|
|
194
198
|
|
|
195
199
|
if "id" not in attributes:
|
|
196
|
-
raise KeyError(f"No mapping for the attribute 'id' for '{
|
|
200
|
+
raise KeyError(f"No mapping for the attribute 'id' for '{component_str}s'!")
|
|
197
201
|
|
|
198
202
|
# Make sure that the "id" column is always parsed first (at least before "extra" is parsed)
|
|
199
203
|
attributes_without_filter = {k: v for k, v in attributes.items() if k != "filters"}
|
|
@@ -207,7 +211,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
207
211
|
data=data,
|
|
208
212
|
pgm_data=pgm_data,
|
|
209
213
|
table=table,
|
|
210
|
-
component=
|
|
214
|
+
component=component_str,
|
|
211
215
|
attr=attr,
|
|
212
216
|
col_def=col_def,
|
|
213
217
|
table_mask=table_mask,
|
|
@@ -232,7 +236,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
232
236
|
data: TabularData,
|
|
233
237
|
pgm_data: np.ndarray,
|
|
234
238
|
table: str,
|
|
235
|
-
component: str,
|
|
239
|
+
component: str | Enum,
|
|
236
240
|
attr: str,
|
|
237
241
|
col_def: Any,
|
|
238
242
|
table_mask: Optional[np.ndarray],
|
|
@@ -254,7 +258,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
254
258
|
data: TabularData:
|
|
255
259
|
pgm_data: np.ndarray:
|
|
256
260
|
table: str:
|
|
257
|
-
component: str:
|
|
261
|
+
component: str | Enum:
|
|
258
262
|
attr: str:
|
|
259
263
|
col_def: Any:
|
|
260
264
|
extra_info: Optional[ExtraInfo]:
|
|
@@ -265,12 +269,15 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
265
269
|
"""
|
|
266
270
|
# To avoid mistakes, the attributes in the mapping should exist. There is one extra attribute called
|
|
267
271
|
# 'extra' in which extra information can be captured.
|
|
272
|
+
|
|
273
|
+
component_str = component.value if isinstance(component, Enum) else component
|
|
274
|
+
|
|
268
275
|
if pgm_data.dtype.names is None:
|
|
269
|
-
raise ValueError(f"pgm_data for '{
|
|
276
|
+
raise ValueError(f"pgm_data for '{component_str}s' has no attributes defined. (dtype.names is None)")
|
|
270
277
|
|
|
271
278
|
if attr not in pgm_data.dtype.names and attr not in ["extra", "filters"]:
|
|
272
279
|
attrs = ", ".join(pgm_data.dtype.names)
|
|
273
|
-
raise KeyError(f"Could not find attribute '{attr}' for '{
|
|
280
|
+
raise KeyError(f"Could not find attribute '{attr}' for '{component_str}s'. (choose from: {attrs})")
|
|
274
281
|
|
|
275
282
|
if attr == "extra":
|
|
276
283
|
# Extra info must be linked to the object IDs, therefore the uuids should be known before extra info can
|
|
@@ -421,7 +428,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
421
428
|
def _parse_col_def_const(
|
|
422
429
|
data: TabularData,
|
|
423
430
|
table: str,
|
|
424
|
-
col_def:
|
|
431
|
+
col_def: int | float,
|
|
425
432
|
table_mask: Optional[np.ndarray] = None,
|
|
426
433
|
) -> pd.DataFrame:
|
|
427
434
|
"""Create a single column pandas DataFrame containing the const value.
|
|
@@ -429,8 +436,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
429
436
|
Args:
|
|
430
437
|
data: TabularData:
|
|
431
438
|
table: str:
|
|
432
|
-
col_def:
|
|
433
|
-
float]:
|
|
439
|
+
col_def: int | float:
|
|
434
440
|
|
|
435
441
|
Returns:
|
|
436
442
|
|
|
@@ -602,7 +608,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
602
608
|
table: str,
|
|
603
609
|
ref_table: Optional[str],
|
|
604
610
|
ref_name: Optional[str],
|
|
605
|
-
key_col_def:
|
|
611
|
+
key_col_def: str | List[str] | Dict[str, str],
|
|
606
612
|
table_mask: Optional[np.ndarray],
|
|
607
613
|
extra_info: Optional[ExtraInfo],
|
|
608
614
|
) -> pd.DataFrame:
|
|
@@ -845,7 +851,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
845
851
|
|
|
846
852
|
return keys.apply(get_id, axis=1).to_list()
|
|
847
853
|
|
|
848
|
-
def lookup_id(self, pgm_id: int) -> Dict[str,
|
|
854
|
+
def lookup_id(self, pgm_id: int) -> Dict[str, str | Dict[str, int]]:
|
|
849
855
|
"""
|
|
850
856
|
Retrieve the original name / key combination of a pgm object
|
|
851
857
|
Args:
|
|
@@ -8,7 +8,7 @@ Vision Excel Converter: Load data from a Vision Excel export file and use a mapp
|
|
|
8
8
|
import logging
|
|
9
9
|
from dataclasses import dataclass
|
|
10
10
|
from pathlib import Path
|
|
11
|
-
from typing import Any, Mapping, Optional
|
|
11
|
+
from typing import Any, Mapping, Optional
|
|
12
12
|
|
|
13
13
|
from power_grid_model_io.converters.tabular_converter import TabularConverter
|
|
14
14
|
from power_grid_model_io.data_stores.base_data_store import LANGUAGE_EN
|
|
@@ -37,10 +37,10 @@ class VisionExcelConverter(TabularConverter):
|
|
|
37
37
|
|
|
38
38
|
def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments
|
|
39
39
|
self,
|
|
40
|
-
source_file: Optional[
|
|
40
|
+
source_file: Optional[Path | str] = None,
|
|
41
41
|
language: str = LANGUAGE_EN,
|
|
42
42
|
terms_changed: Optional[dict] = None,
|
|
43
|
-
mapping_file: Optional[
|
|
43
|
+
mapping_file: Optional[Path | str] = None,
|
|
44
44
|
log_level: int = logging.INFO,
|
|
45
45
|
):
|
|
46
46
|
_mapping_file = Path(
|
|
@@ -7,7 +7,7 @@ Excel File Store
|
|
|
7
7
|
|
|
8
8
|
import re
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import Dict, List, Optional, Set, Tuple
|
|
10
|
+
from typing import Dict, List, Optional, Set, Tuple
|
|
11
11
|
|
|
12
12
|
import pandas as pd
|
|
13
13
|
|
|
@@ -185,10 +185,10 @@ class ExcelFileStore(BaseDataStore[TabularData]):
|
|
|
185
185
|
|
|
186
186
|
return data
|
|
187
187
|
|
|
188
|
-
def _check_duplicate_values(self, sheet_name: str, data: pd.DataFrame) -> Dict[int,
|
|
188
|
+
def _check_duplicate_values(self, sheet_name: str, data: pd.DataFrame) -> Dict[int, str | Tuple[str, ...]]:
|
|
189
189
|
grouped = self._group_columns_by_index(data=data)
|
|
190
190
|
|
|
191
|
-
to_rename: Dict[int,
|
|
191
|
+
to_rename: Dict[int, str | Tuple[str, ...]] = {}
|
|
192
192
|
|
|
193
193
|
for col_name, col_idxs in grouped.items():
|
|
194
194
|
# No duplicate column names
|
|
@@ -251,8 +251,8 @@ class ExcelFileStore(BaseDataStore[TabularData]):
|
|
|
251
251
|
return data
|
|
252
252
|
|
|
253
253
|
@staticmethod
|
|
254
|
-
def _group_columns_by_index(data: pd.DataFrame) -> Dict[
|
|
255
|
-
grouped: Dict[
|
|
254
|
+
def _group_columns_by_index(data: pd.DataFrame) -> Dict[str | Tuple[str, ...], Set[int]]:
|
|
255
|
+
grouped: Dict[str | Tuple[str, ...], Set[int]] = {}
|
|
256
256
|
columns = data.columns.values
|
|
257
257
|
for col_idx, col_name in enumerate(columns):
|
|
258
258
|
if col_name[0] not in grouped:
|
|
@@ -105,5 +105,5 @@ class JsonFileStore(BaseDataStore[StructuredData]):
|
|
|
105
105
|
if len(type_names) == 1:
|
|
106
106
|
type_str = type_names.pop()
|
|
107
107
|
else:
|
|
108
|
-
type_str = "
|
|
108
|
+
type_str = " | ".join(type_names)
|
|
109
109
|
raise TypeError(f"Invalid data type for {type(self).__name__}: List[{type_str}]")
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
Common data types used in the Power Grid Model project
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Any, Dict, List
|
|
8
|
+
from typing import Any, Dict, List
|
|
9
9
|
|
|
10
10
|
ExtraInfo = Dict[int, Any]
|
|
11
11
|
"""
|
|
@@ -36,7 +36,7 @@ ExtraInfoLookup = ExtraInfo
|
|
|
36
36
|
Legacy type name; use ExtraInfo instead!
|
|
37
37
|
"""
|
|
38
38
|
|
|
39
|
-
StructuredData =
|
|
39
|
+
StructuredData = Dict[str, List[Dict[str, Any]]] | List[Dict[str, List[Dict[str, Any]]]]
|
|
40
40
|
"""
|
|
41
41
|
Structured data is a multi dimensional structure (component_type -> objects -> attribute -> value) or a list of those
|
|
42
42
|
dictionaries:
|
|
@@ -2,12 +2,12 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: MPL-2.0
|
|
4
4
|
"""
|
|
5
|
-
The TabularData class is a wrapper around Dict[str,
|
|
5
|
+
The TabularData class is a wrapper around Dict[str, pd.DataFrame | np.ndarray],
|
|
6
6
|
which supports unit conversions and value substitutions
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
import logging
|
|
10
|
-
from typing import Callable, Dict, Generator, Iterable, Optional, Tuple
|
|
10
|
+
from typing import Callable, Dict, Generator, Iterable, Optional, Tuple
|
|
11
11
|
|
|
12
12
|
import numpy as np
|
|
13
13
|
import pandas as pd
|
|
@@ -21,11 +21,11 @@ LazyDataFrame = Callable[[], pd.DataFrame]
|
|
|
21
21
|
|
|
22
22
|
class TabularData:
|
|
23
23
|
"""
|
|
24
|
-
The TabularData class is a wrapper around Dict[str,
|
|
24
|
+
The TabularData class is a wrapper around Dict[str, pd.DataFrame | np.ndarray],
|
|
25
25
|
which supports unit conversions and value substitutions
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
|
-
def __init__(self, logger=None, **tables:
|
|
28
|
+
def __init__(self, logger=None, **tables: pd.DataFrame | np.ndarray | LazyDataFrame):
|
|
29
29
|
"""
|
|
30
30
|
Tabular data can either be a collection of pandas DataFrames and/or numpy structured arrays.
|
|
31
31
|
The key word arguments will define the keys of the data.
|
|
@@ -48,7 +48,7 @@ class TabularData:
|
|
|
48
48
|
f"Invalid data type for table '{table_name}'; "
|
|
49
49
|
f"expected a pandas DataFrame or NumPy array, got {type(table_data).__name__}."
|
|
50
50
|
)
|
|
51
|
-
self._data: Dict[str,
|
|
51
|
+
self._data: Dict[str, pd.DataFrame | np.ndarray | LazyDataFrame] = tables
|
|
52
52
|
self._units: Optional[UnitMapping] = None
|
|
53
53
|
self._substitution: Optional[ValueMapping] = None
|
|
54
54
|
|
|
@@ -181,7 +181,7 @@ class TabularData:
|
|
|
181
181
|
"""
|
|
182
182
|
return table_name in self._data
|
|
183
183
|
|
|
184
|
-
def __getitem__(self, table_name: str) ->
|
|
184
|
+
def __getitem__(self, table_name: str) -> pd.DataFrame | np.ndarray:
|
|
185
185
|
"""
|
|
186
186
|
Mimic the dictionary [] operator. It returns the 'raw' table data as stored in memory. This can be either a
|
|
187
187
|
pandas DataFrame or a numpy structured array. It is possible that some unit conversions have been applied by
|
|
@@ -206,7 +206,7 @@ class TabularData:
|
|
|
206
206
|
|
|
207
207
|
return self._data.keys()
|
|
208
208
|
|
|
209
|
-
def items(self) -> Generator[Tuple[str,
|
|
209
|
+
def items(self) -> Generator[Tuple[str, pd.DataFrame | np.ndarray], None, None]:
|
|
210
210
|
"""
|
|
211
211
|
Mimic the dictionary .items() function
|
|
212
212
|
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
These functions can be used in the mapping files to apply filter functions to vision data
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import List
|
|
8
|
+
from typing import List
|
|
9
9
|
|
|
10
10
|
import pandas as pd
|
|
11
11
|
|
|
@@ -24,7 +24,7 @@ def exclude_empty(row: pd.Series, col: str) -> bool:
|
|
|
24
24
|
return result
|
|
25
25
|
|
|
26
26
|
|
|
27
|
-
def exclude_value(row: pd.Series, col: str, value:
|
|
27
|
+
def exclude_value(row: pd.Series, col: str, value: float | str) -> bool:
|
|
28
28
|
"""
|
|
29
29
|
filter out by match value
|
|
30
30
|
"""
|
|
@@ -5,13 +5,13 @@
|
|
|
5
5
|
Tabular data mapping helper class
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Dict, Generator, List, Tuple
|
|
8
|
+
from typing import Dict, Generator, List, Tuple
|
|
9
9
|
|
|
10
10
|
import structlog
|
|
11
11
|
|
|
12
|
-
AttributeValue =
|
|
12
|
+
AttributeValue = int | float | str | Dict | List
|
|
13
13
|
InstanceAttributes = Dict[str, AttributeValue]
|
|
14
|
-
Components = Dict[str,
|
|
14
|
+
Components = Dict[str, InstanceAttributes | List[InstanceAttributes]]
|
|
15
15
|
Tables = Dict[str, Components]
|
|
16
16
|
|
|
17
17
|
|
|
@@ -5,13 +5,13 @@
|
|
|
5
5
|
Value substitution helper class
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Dict, Optional
|
|
8
|
+
from typing import Dict, Optional
|
|
9
9
|
|
|
10
10
|
import structlog
|
|
11
11
|
|
|
12
12
|
from power_grid_model_io.mappings.field_mapping import FieldMapping
|
|
13
13
|
|
|
14
|
-
Value =
|
|
14
|
+
Value = int | float | str | bool
|
|
15
15
|
|
|
16
16
|
# attr key value
|
|
17
17
|
Values = Dict[str, Dict[Value, Value]]
|
|
@@ -7,7 +7,7 @@ Automatic ID generator class
|
|
|
7
7
|
|
|
8
8
|
import collections
|
|
9
9
|
from collections.abc import Hashable
|
|
10
|
-
from typing import Any, Dict, List, Optional
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class AutoID:
|
|
@@ -94,7 +94,7 @@ class AutoID:
|
|
|
94
94
|
# Return the numeric id
|
|
95
95
|
return idx
|
|
96
96
|
|
|
97
|
-
def __contains__(self, item:
|
|
97
|
+
def __contains__(self, item: int | Hashable) -> bool:
|
|
98
98
|
"""
|
|
99
99
|
Check if the id, or the item exists
|
|
100
100
|
|
|
@@ -29,7 +29,7 @@ import tempfile
|
|
|
29
29
|
from dataclasses import dataclass
|
|
30
30
|
from pathlib import Path
|
|
31
31
|
from shutil import rmtree as remove_dir
|
|
32
|
-
from typing import Optional
|
|
32
|
+
from typing import Optional
|
|
33
33
|
from urllib import request
|
|
34
34
|
|
|
35
35
|
import structlog
|
|
@@ -81,7 +81,7 @@ class DownloadProgressHook: # pylint: disable=too-few-public-methods
|
|
|
81
81
|
|
|
82
82
|
|
|
83
83
|
def download_and_extract(
|
|
84
|
-
url: str, dir_path: Optional[Path] = None, file_name: Optional[
|
|
84
|
+
url: str, dir_path: Optional[Path] = None, file_name: Optional[str | Path] = None, overwrite: bool = False
|
|
85
85
|
) -> Path:
|
|
86
86
|
"""
|
|
87
87
|
Download a file from a URL and store it locally, extract the contents and return the path to the contents.
|
|
@@ -112,7 +112,7 @@ def download_and_extract(
|
|
|
112
112
|
|
|
113
113
|
|
|
114
114
|
def download(
|
|
115
|
-
url: str, file_name: Optional[
|
|
115
|
+
url: str, file_name: Optional[str | Path] = None, dir_path: Optional[Path] = None, overwrite: bool = False
|
|
116
116
|
) -> Path:
|
|
117
117
|
"""
|
|
118
118
|
Download a file from a URL and store it locally
|
|
@@ -200,7 +200,7 @@ def get_response_info(url: str) -> ResponseInfo:
|
|
|
200
200
|
|
|
201
201
|
def get_download_path(
|
|
202
202
|
dir_path: Optional[Path] = None,
|
|
203
|
-
file_name: Optional[
|
|
203
|
+
file_name: Optional[str | Path] = None,
|
|
204
204
|
unique_key: Optional[str] = None,
|
|
205
205
|
) -> Path:
|
|
206
206
|
"""
|
|
@@ -16,7 +16,7 @@ nieuw_bestand = convert_guid_vision_excel("vision_97_nl.xlsx", number="Nummer",
|
|
|
16
16
|
import os
|
|
17
17
|
import re
|
|
18
18
|
from pathlib import Path
|
|
19
|
-
from typing import Optional
|
|
19
|
+
from typing import Optional
|
|
20
20
|
|
|
21
21
|
import pandas as pd
|
|
22
22
|
|
|
@@ -120,7 +120,7 @@ class UUID2IntCvtr:
|
|
|
120
120
|
return self._counter
|
|
121
121
|
|
|
122
122
|
|
|
123
|
-
def load_excel_file(file_name:
|
|
123
|
+
def load_excel_file(file_name: Path | str) -> pd.ExcelFile:
|
|
124
124
|
"""Load an excel file
|
|
125
125
|
|
|
126
126
|
Args:
|
|
@@ -219,7 +219,7 @@ def save_df_to_excel(df: pd.DataFrame, file_name: str, sheet_name: str, i: int)
|
|
|
219
219
|
|
|
220
220
|
|
|
221
221
|
def convert_guid_vision_excel(
|
|
222
|
-
excel_file:
|
|
222
|
+
excel_file: Path | str,
|
|
223
223
|
number: str = VISION_EXCEL_LAN_DICT[LANGUAGE_EN][DICT_KEY_NUMBER],
|
|
224
224
|
terms_changed: Optional[dict] = None,
|
|
225
225
|
) -> str:
|
|
@@ -9,42 +9,42 @@ power_grid_model_io/config/excel/vision_en_9_7.yaml,sha256=ts_qcrO3Gd8tQwPFzGWEb
|
|
|
9
9
|
power_grid_model_io/config/excel/vision_en_9_8.yaml,sha256=yCa4v5VUK1xCZ0PrHG2ZflpIZN-D7dNsS7PKYW8K9Ac,20938
|
|
10
10
|
power_grid_model_io/config/excel/vision_nl.yaml,sha256=0IOh5Ug9QwP2dgNMsl90RARV_v-xxcA6rv8Ya0WCPuI,21426
|
|
11
11
|
power_grid_model_io/converters/__init__.py,sha256=qng1-O8kfpgalPms_C_zAwBuiuKUoquf27u6JkVH-9k,487
|
|
12
|
-
power_grid_model_io/converters/base_converter.py,sha256=
|
|
13
|
-
power_grid_model_io/converters/pandapower_converter.py,sha256=
|
|
14
|
-
power_grid_model_io/converters/pgm_json_converter.py,sha256=
|
|
15
|
-
power_grid_model_io/converters/tabular_converter.py,sha256=
|
|
16
|
-
power_grid_model_io/converters/vision_excel_converter.py,sha256=
|
|
12
|
+
power_grid_model_io/converters/base_converter.py,sha256=ifF-U1kmJIW9Tj9uW7d0XJlsBrmvpUZoi3WxNPAbKO4,6170
|
|
13
|
+
power_grid_model_io/converters/pandapower_converter.py,sha256=ccD1zBpY2UiKealTF3f1nnwraNKeGdwNzk_cMN6RE2Y,117805
|
|
14
|
+
power_grid_model_io/converters/pgm_json_converter.py,sha256=7Z1Qmoxn0qo4fkLXvmNTWNMHOJHhnyGTvxlRlcHbMLg,14210
|
|
15
|
+
power_grid_model_io/converters/tabular_converter.py,sha256=XmUDVthYyJQiHXxzspVCjUo3ZGsv4XXwZ_VRQLn4bvs,35666
|
|
16
|
+
power_grid_model_io/converters/vision_excel_converter.py,sha256=hiEOqXgfM3XbHDJ-ri-Os2a4Dh5lZFqF0m9_YFBaYs4,4166
|
|
17
17
|
power_grid_model_io/data_stores/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
18
18
|
power_grid_model_io/data_stores/base_data_store.py,sha256=DJfLtRwvx_tXKnpjtBdfbMqPjWc324Eo5WeKTXjWXqc,1706
|
|
19
19
|
power_grid_model_io/data_stores/csv_dir_store.py,sha256=H8ICXZRLDvp9OkbjkfHnoh4y7uNSXNepHAW6W53VsIw,1877
|
|
20
|
-
power_grid_model_io/data_stores/excel_file_store.py,sha256=
|
|
21
|
-
power_grid_model_io/data_stores/json_file_store.py,sha256=
|
|
20
|
+
power_grid_model_io/data_stores/excel_file_store.py,sha256=W1axhUGd7l42phuPvZLU8GWyUr2SlWJEdljHD0sNYeY,10837
|
|
21
|
+
power_grid_model_io/data_stores/json_file_store.py,sha256=eMEZLJa853_aUxwPmXEF4L8C_XzlB-glgxB5iE-UnUI,3938
|
|
22
22
|
power_grid_model_io/data_stores/vision_excel_file_store.py,sha256=QJjT6lfqfRG4Zt8Lsm81hNwjZa5Z_6dndUfwUIyPN3Q,1060
|
|
23
23
|
power_grid_model_io/data_types/__init__.py,sha256=cTHmwEVWQsjogGzMUhZ5wb79l3CaFyqa1If0_XCL65w,477
|
|
24
|
-
power_grid_model_io/data_types/_data_types.py,sha256=
|
|
25
|
-
power_grid_model_io/data_types/tabular_data.py,sha256=
|
|
24
|
+
power_grid_model_io/data_types/_data_types.py,sha256=8kKDdYTF9BZL1BscnIEUHaz1d6B1nJipXweSqUs55RY,1651
|
|
25
|
+
power_grid_model_io/data_types/tabular_data.py,sha256=3sLF3CAxwbA8GCxKf-MDMETBUakhVIZOVmaF5adIENk,8541
|
|
26
26
|
power_grid_model_io/functions/__init__.py,sha256=l1PjXVh21UTlWmv3j_KvflEymTUcGNxm8BDDpt1jUYc,734
|
|
27
27
|
power_grid_model_io/functions/_functions.py,sha256=tqwwZ0G8AeDza0IiS6CSMwKB0lV1hDo2D8e9-ARHXQM,2843
|
|
28
|
-
power_grid_model_io/functions/filters.py,sha256=
|
|
28
|
+
power_grid_model_io/functions/filters.py,sha256=yF24k64r5FDFVSSgYGMpRq-JmrM6pfeTekkLeXXtnB8,1385
|
|
29
29
|
power_grid_model_io/functions/phase_to_phase.py,sha256=Cufj3lcUESKa_AFHn27GsUMxjTFmF5mj0-sdFrE7V00,4495
|
|
30
30
|
power_grid_model_io/mappings/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
31
31
|
power_grid_model_io/mappings/field_mapping.py,sha256=YfrwKolNG06kIC1sbUYnYmxuOrbNbNo1dYtnF8rNItw,1659
|
|
32
32
|
power_grid_model_io/mappings/multiplier_mapping.py,sha256=mQ112SMbuIgCZ1haMOxMtfbn2kMsaMaYSfEGv73gSGQ,910
|
|
33
|
-
power_grid_model_io/mappings/tabular_mapping.py,sha256=
|
|
33
|
+
power_grid_model_io/mappings/tabular_mapping.py,sha256=HH_O1wbuLmfdq-Wnbhb7-2rFq1BaMrHSs8T6aRHNwDc,1815
|
|
34
34
|
power_grid_model_io/mappings/unit_mapping.py,sha256=Z6DGp5Z7f0kLbcU9oih466at1OHAGzWdYeSZF9DGpnI,2933
|
|
35
|
-
power_grid_model_io/mappings/value_mapping.py,sha256=
|
|
35
|
+
power_grid_model_io/mappings/value_mapping.py,sha256=BnO4WWFVPivnT-8FFbcv-l1R8P5OIaUA7reINK8ZOAs,1236
|
|
36
36
|
power_grid_model_io/utils/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
37
|
-
power_grid_model_io/utils/auto_id.py,sha256=
|
|
37
|
+
power_grid_model_io/utils/auto_id.py,sha256=eOHDAjmV-xM2RLeWVYEjVa85_dfUBIgX_dVq8SV7BMQ,4110
|
|
38
38
|
power_grid_model_io/utils/dict.py,sha256=HTepnXhEnyBDfDrDm21c5DIlMEO1o5ShMSSnUGm2rDE,1015
|
|
39
|
-
power_grid_model_io/utils/download.py,sha256=
|
|
39
|
+
power_grid_model_io/utils/download.py,sha256=TGExH7CRptiFGoqnhz2aJKvScJ9EzD4oDvbc1LvcIzo,9432
|
|
40
40
|
power_grid_model_io/utils/excel_ambiguity_checker.py,sha256=11hkzMuJHMpTIM7uf1AH3Wcs9Q3hfjEmXORIk1IfE_s,6923
|
|
41
41
|
power_grid_model_io/utils/json.py,sha256=dQDRd2Vb8pfqLU2hTuWYv2cpSIBBbFhd0LOBP21YxJI,3327
|
|
42
42
|
power_grid_model_io/utils/modules.py,sha256=DJLmYKt9cV_GvqJI8wkXppNycqD4b8n5-o_87XXfQbc,929
|
|
43
43
|
power_grid_model_io/utils/parsing.py,sha256=cw6d3S89BvB8dncN0SeFHDhFG7ZlDNx9iGYWjZk5fVU,4684
|
|
44
|
-
power_grid_model_io/utils/uuid_excel_cvtr.py,sha256=
|
|
44
|
+
power_grid_model_io/utils/uuid_excel_cvtr.py,sha256=FFsfnvELVTkIXE_qfEPjFlphsikAr7GyjnNwsDZ-AgY,7581
|
|
45
45
|
power_grid_model_io/utils/zip.py,sha256=VXHX4xWPPZbhOlZUAbMDy3MgQFzK6_l7sRvGXihNUY4,3875
|
|
46
|
-
power_grid_model_io-1.3.
|
|
47
|
-
power_grid_model_io-1.3.
|
|
48
|
-
power_grid_model_io-1.3.
|
|
49
|
-
power_grid_model_io-1.3.
|
|
50
|
-
power_grid_model_io-1.3.
|
|
46
|
+
power_grid_model_io-1.3.7.dist-info/licenses/LICENSE,sha256=7Pm2fWFFHHUG5lDHed1vl5CjzxObIXQglnYsEdtjo_k,14907
|
|
47
|
+
power_grid_model_io-1.3.7.dist-info/METADATA,sha256=3qhKu0mzHLs7yugF6QwwsTNP6ReWs4oZuZMbhxGy4NA,8195
|
|
48
|
+
power_grid_model_io-1.3.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
49
|
+
power_grid_model_io-1.3.7.dist-info/top_level.txt,sha256=7sq9VveemMm2R0RgTBa4tH8y_xF4_1hxbufmX9OjCTo,20
|
|
50
|
+
power_grid_model_io-1.3.7.dist-info/RECORD,,
|
|
File without changes
|
{power_grid_model_io-1.3.6.dist-info → power_grid_model_io-1.3.7.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|