power-grid-model-io 1.2.85__py3-none-any.whl → 1.2.87__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of power-grid-model-io might be problematic. Click here for more details.
- power_grid_model_io/config/excel/vision_en.yaml +14 -0
- power_grid_model_io/config/excel/vision_en_9_7.yaml +14 -0
- power_grid_model_io/config/excel/vision_nl.yaml +14 -0
- power_grid_model_io/converters/tabular_converter.py +109 -30
- power_grid_model_io/functions/filters.py +40 -0
- {power_grid_model_io-1.2.85.dist-info → power_grid_model_io-1.2.87.dist-info}/METADATA +1 -1
- {power_grid_model_io-1.2.85.dist-info → power_grid_model_io-1.2.87.dist-info}/RECORD +10 -9
- {power_grid_model_io-1.2.85.dist-info → power_grid_model_io-1.2.87.dist-info}/WHEEL +1 -1
- {power_grid_model_io-1.2.85.dist-info → power_grid_model_io-1.2.87.dist-info}/LICENSE +0 -0
- {power_grid_model_io-1.2.85.dist-info → power_grid_model_io-1.2.87.dist-info}/top_level.txt +0 -0
|
@@ -424,6 +424,11 @@ grid:
|
|
|
424
424
|
extra:
|
|
425
425
|
- ID
|
|
426
426
|
- Name
|
|
427
|
+
filters:
|
|
428
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
429
|
+
cols:
|
|
430
|
+
- Load.P
|
|
431
|
+
- Load.Q
|
|
427
432
|
sym_gen:
|
|
428
433
|
- id:
|
|
429
434
|
auto_id:
|
|
@@ -444,6 +449,11 @@ grid:
|
|
|
444
449
|
extra:
|
|
445
450
|
- ID
|
|
446
451
|
- Name
|
|
452
|
+
filters:
|
|
453
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
454
|
+
cols:
|
|
455
|
+
- Generation.P
|
|
456
|
+
- Generation.Q
|
|
447
457
|
- id:
|
|
448
458
|
auto_id:
|
|
449
459
|
name: pv_generation
|
|
@@ -466,6 +476,10 @@ grid:
|
|
|
466
476
|
extra:
|
|
467
477
|
- ID
|
|
468
478
|
- Name
|
|
479
|
+
filters:
|
|
480
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
481
|
+
cols:
|
|
482
|
+
- PV.Pnom
|
|
469
483
|
Sources:
|
|
470
484
|
source:
|
|
471
485
|
id:
|
|
@@ -425,6 +425,11 @@ grid:
|
|
|
425
425
|
extra:
|
|
426
426
|
- ID
|
|
427
427
|
- Name
|
|
428
|
+
filters:
|
|
429
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
430
|
+
cols:
|
|
431
|
+
- Load.P
|
|
432
|
+
- Load.Q
|
|
428
433
|
sym_gen:
|
|
429
434
|
- id:
|
|
430
435
|
auto_id:
|
|
@@ -445,6 +450,11 @@ grid:
|
|
|
445
450
|
extra:
|
|
446
451
|
- ID
|
|
447
452
|
- Name
|
|
453
|
+
filters:
|
|
454
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
455
|
+
cols:
|
|
456
|
+
- Generation.P
|
|
457
|
+
- Generation.Q
|
|
448
458
|
- id:
|
|
449
459
|
auto_id:
|
|
450
460
|
name: pv_generation
|
|
@@ -467,6 +477,10 @@ grid:
|
|
|
467
477
|
extra:
|
|
468
478
|
- ID
|
|
469
479
|
- Name
|
|
480
|
+
filters:
|
|
481
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
482
|
+
cols:
|
|
483
|
+
- PV.Pnom
|
|
470
484
|
Sources:
|
|
471
485
|
source:
|
|
472
486
|
id:
|
|
@@ -415,6 +415,11 @@ grid:
|
|
|
415
415
|
extra:
|
|
416
416
|
- ID
|
|
417
417
|
- Naam
|
|
418
|
+
filters:
|
|
419
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
420
|
+
cols:
|
|
421
|
+
- Belasting.P
|
|
422
|
+
- Belasting.Q
|
|
418
423
|
sym_gen:
|
|
419
424
|
- id:
|
|
420
425
|
auto_id:
|
|
@@ -435,6 +440,11 @@ grid:
|
|
|
435
440
|
extra:
|
|
436
441
|
- ID
|
|
437
442
|
- Naam
|
|
443
|
+
filters:
|
|
444
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
445
|
+
cols:
|
|
446
|
+
- Opwekking.P
|
|
447
|
+
- Opwekking.Q
|
|
438
448
|
- id:
|
|
439
449
|
auto_id:
|
|
440
450
|
name: pv_generation
|
|
@@ -457,6 +467,10 @@ grid:
|
|
|
457
467
|
extra:
|
|
458
468
|
- ID
|
|
459
469
|
- Naam
|
|
470
|
+
filters:
|
|
471
|
+
- power_grid_model_io.functions.filters.exclude_all_columns_empty_or_zero:
|
|
472
|
+
cols:
|
|
473
|
+
- PV.Pnom
|
|
460
474
|
Netvoedingen:
|
|
461
475
|
source:
|
|
462
476
|
id:
|
|
@@ -178,7 +178,12 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
178
178
|
if table not in data:
|
|
179
179
|
return None
|
|
180
180
|
|
|
181
|
-
|
|
181
|
+
if "filters" in attributes:
|
|
182
|
+
table_mask = self._parse_table_filters(data=data, table=table, filtering_functions=attributes["filters"])
|
|
183
|
+
else:
|
|
184
|
+
table_mask = None
|
|
185
|
+
|
|
186
|
+
n_records = np.sum(table_mask) if table_mask is not None else len(data[table])
|
|
182
187
|
|
|
183
188
|
try:
|
|
184
189
|
pgm_data = initialize_array(data_type=data_type, component_type=component, shape=n_records)
|
|
@@ -189,7 +194,8 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
189
194
|
raise KeyError(f"No mapping for the attribute 'id' for '{component}s'!")
|
|
190
195
|
|
|
191
196
|
# Make sure that the "id" column is always parsed first (at least before "extra" is parsed)
|
|
192
|
-
|
|
197
|
+
attributes_without_filter = {k: v for k, v in attributes.items() if k != "filters"}
|
|
198
|
+
sorted_attributes = sorted(attributes_without_filter.items(), key=lambda x: "" if x[0] == "id" else x[0])
|
|
193
199
|
|
|
194
200
|
for attr, col_def in sorted_attributes:
|
|
195
201
|
self._convert_col_def_to_attribute(
|
|
@@ -199,11 +205,23 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
199
205
|
component=component,
|
|
200
206
|
attr=attr,
|
|
201
207
|
col_def=col_def,
|
|
208
|
+
table_mask=table_mask,
|
|
202
209
|
extra_info=extra_info,
|
|
203
210
|
)
|
|
204
211
|
|
|
205
212
|
return pgm_data
|
|
206
213
|
|
|
214
|
+
def _parse_table_filters(self, data: TabularData, table: str, filtering_functions: Any) -> Optional[np.ndarray]:
|
|
215
|
+
if not isinstance(data[table], pd.DataFrame):
|
|
216
|
+
return None
|
|
217
|
+
|
|
218
|
+
table_mask = np.ones(len(data[table]), dtype=bool)
|
|
219
|
+
for filtering_fn in filtering_functions:
|
|
220
|
+
for fn_name, kwargs in filtering_fn.items():
|
|
221
|
+
fn_ptr = get_function(fn_name)
|
|
222
|
+
table_mask &= data[table].apply(fn_ptr, axis=1, **kwargs).values
|
|
223
|
+
return table_mask
|
|
224
|
+
|
|
207
225
|
# pylint: disable = too-many-arguments
|
|
208
226
|
def _convert_col_def_to_attribute(
|
|
209
227
|
self,
|
|
@@ -213,6 +231,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
213
231
|
component: str,
|
|
214
232
|
attr: str,
|
|
215
233
|
col_def: Any,
|
|
234
|
+
table_mask: Optional[np.ndarray],
|
|
216
235
|
extra_info: Optional[ExtraInfo],
|
|
217
236
|
):
|
|
218
237
|
"""This function updates one of the attributes of pgm_data, based on the corresponding table/column in a tabular
|
|
@@ -242,7 +261,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
242
261
|
"""
|
|
243
262
|
# To avoid mistakes, the attributes in the mapping should exist. There is one extra attribute called
|
|
244
263
|
# 'extra' in which extra information can be captured.
|
|
245
|
-
if attr not in pgm_data.dtype.names and attr
|
|
264
|
+
if attr not in pgm_data.dtype.names and attr not in ["extra", "filters"]:
|
|
246
265
|
attrs = ", ".join(pgm_data.dtype.names)
|
|
247
266
|
raise KeyError(f"Could not find attribute '{attr}' for '{component}s'. (choose from: {attrs})")
|
|
248
267
|
|
|
@@ -250,12 +269,19 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
250
269
|
# Extra info must be linked to the object IDs, therefore the uuids should be known before extra info can
|
|
251
270
|
# be parsed. Before this for loop, it is checked that "id" exists and it is placed at the front.
|
|
252
271
|
self._handle_extra_info(
|
|
253
|
-
data=data,
|
|
272
|
+
data=data,
|
|
273
|
+
table=table,
|
|
274
|
+
col_def=col_def,
|
|
275
|
+
uuids=pgm_data["id"],
|
|
276
|
+
table_mask=table_mask,
|
|
277
|
+
extra_info=extra_info,
|
|
254
278
|
)
|
|
255
279
|
# Extra info should not be added to the numpy arrays, so let's continue to the next attribute
|
|
256
280
|
return
|
|
257
281
|
|
|
258
|
-
attr_data = self._parse_col_def(
|
|
282
|
+
attr_data = self._parse_col_def(
|
|
283
|
+
data=data, table=table, table_mask=table_mask, col_def=col_def, extra_info=extra_info
|
|
284
|
+
)
|
|
259
285
|
|
|
260
286
|
if len(attr_data.columns) != 1:
|
|
261
287
|
raise ValueError(f"DataFrame for {component}.{attr} should contain a single column ({attr_data.columns})")
|
|
@@ -268,6 +294,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
268
294
|
table: str,
|
|
269
295
|
col_def: Any,
|
|
270
296
|
uuids: np.ndarray,
|
|
297
|
+
table_mask: Optional[np.ndarray],
|
|
271
298
|
extra_info: Optional[ExtraInfo],
|
|
272
299
|
) -> None:
|
|
273
300
|
"""This function can extract extra info from the tabular data and store it in the extra_info dict
|
|
@@ -292,7 +319,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
292
319
|
if extra_info is None:
|
|
293
320
|
return
|
|
294
321
|
|
|
295
|
-
extra = self._parse_col_def(
|
|
322
|
+
extra = self._parse_col_def(
|
|
323
|
+
data=data, table=table, table_mask=table_mask, col_def=col_def, extra_info=None
|
|
324
|
+
).to_dict(orient="records")
|
|
296
325
|
for i, xtr in zip(uuids, extra):
|
|
297
326
|
xtr = {
|
|
298
327
|
k[0] if isinstance(k, tuple) else k: v
|
|
@@ -339,7 +368,12 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
339
368
|
return TabularData(logger=self._log, **data)
|
|
340
369
|
|
|
341
370
|
def _parse_col_def(
|
|
342
|
-
self,
|
|
371
|
+
self,
|
|
372
|
+
data: TabularData,
|
|
373
|
+
table: str,
|
|
374
|
+
col_def: Any,
|
|
375
|
+
table_mask: Optional[np.ndarray],
|
|
376
|
+
extra_info: Optional[ExtraInfo],
|
|
343
377
|
) -> pd.DataFrame:
|
|
344
378
|
"""Interpret the column definition and extract/convert/create the data as a pandas DataFrame.
|
|
345
379
|
|
|
@@ -353,17 +387,21 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
353
387
|
|
|
354
388
|
"""
|
|
355
389
|
if isinstance(col_def, (int, float)):
|
|
356
|
-
return self._parse_col_def_const(data=data, table=table, col_def=col_def)
|
|
390
|
+
return self._parse_col_def_const(data=data, table=table, col_def=col_def, table_mask=table_mask)
|
|
357
391
|
if isinstance(col_def, str):
|
|
358
|
-
return self._parse_col_def_column_name(data=data, table=table, col_def=col_def)
|
|
392
|
+
return self._parse_col_def_column_name(data=data, table=table, col_def=col_def, table_mask=table_mask)
|
|
359
393
|
if isinstance(col_def, dict):
|
|
360
|
-
return self._parse_col_def_filter(
|
|
394
|
+
return self._parse_col_def_filter(
|
|
395
|
+
data=data, table=table, table_mask=table_mask, col_def=col_def, extra_info=extra_info
|
|
396
|
+
)
|
|
361
397
|
if isinstance(col_def, list):
|
|
362
|
-
return self._parse_col_def_composite(data=data, table=table, col_def=col_def)
|
|
398
|
+
return self._parse_col_def_composite(data=data, table=table, col_def=col_def, table_mask=table_mask)
|
|
363
399
|
raise TypeError(f"Invalid column definition: {col_def}")
|
|
364
400
|
|
|
365
401
|
@staticmethod
|
|
366
|
-
def _parse_col_def_const(
|
|
402
|
+
def _parse_col_def_const(
|
|
403
|
+
data: TabularData, table: str, col_def: Union[int, float], table_mask: Optional[np.ndarray] = None
|
|
404
|
+
) -> pd.DataFrame:
|
|
367
405
|
"""Create a single column pandas DataFrame containing the const value.
|
|
368
406
|
|
|
369
407
|
Args:
|
|
@@ -376,9 +414,15 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
376
414
|
|
|
377
415
|
"""
|
|
378
416
|
assert isinstance(col_def, (int, float))
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
417
|
+
const_df = pd.DataFrame([col_def] * len(data[table]))
|
|
418
|
+
if table_mask is not None:
|
|
419
|
+
# Required to retain indices before filter
|
|
420
|
+
return const_df[table_mask]
|
|
421
|
+
return const_df
|
|
422
|
+
|
|
423
|
+
def _parse_col_def_column_name(
|
|
424
|
+
self, data: TabularData, table: str, col_def: str, table_mask: Optional[np.ndarray] = None
|
|
425
|
+
) -> pd.DataFrame:
|
|
382
426
|
"""Extract a column from the data. If the column doesn't exist, check if the col_def is a special float value,
|
|
383
427
|
like 'inf'. If that's the case, create a single column pandas DataFrame containing the const value.
|
|
384
428
|
|
|
@@ -391,13 +435,18 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
391
435
|
|
|
392
436
|
"""
|
|
393
437
|
assert isinstance(col_def, str)
|
|
438
|
+
|
|
394
439
|
table_data = data[table]
|
|
440
|
+
if table_mask is not None:
|
|
441
|
+
table_data = table_data[table_mask]
|
|
395
442
|
|
|
396
443
|
# If multiple columns are given in col_def, return the first column that exists in the dataset
|
|
397
444
|
columns = [col_name.strip() for col_name in col_def.split("|")]
|
|
398
445
|
for col_name in columns:
|
|
399
446
|
if col_name in table_data or col_name == "index":
|
|
400
447
|
col_data = data.get_column(table_name=table, column_name=col_name)
|
|
448
|
+
if table_mask is not None:
|
|
449
|
+
col_data = col_data[table_mask]
|
|
401
450
|
col_data = self._apply_multiplier(table=table, column=col_name, data=col_data)
|
|
402
451
|
return pd.DataFrame(col_data)
|
|
403
452
|
|
|
@@ -408,7 +457,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
408
457
|
columns_str = " and ".join(f"'{col_name}'" for col_name in columns)
|
|
409
458
|
raise KeyError(f"Could not find column {columns_str} on table '{table}'")
|
|
410
459
|
|
|
411
|
-
return self._parse_col_def_const(data=data, table=table, col_def=const_value)
|
|
460
|
+
return self._parse_col_def_const(data=data, table=table, col_def=const_value, table_mask=table_mask)
|
|
412
461
|
|
|
413
462
|
def _apply_multiplier(self, table: str, column: str, data: pd.Series) -> pd.Series:
|
|
414
463
|
if self._multipliers is None:
|
|
@@ -421,7 +470,14 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
421
470
|
return data
|
|
422
471
|
|
|
423
472
|
def _parse_reference(
|
|
424
|
-
self,
|
|
473
|
+
self,
|
|
474
|
+
data: TabularData,
|
|
475
|
+
table: str,
|
|
476
|
+
other_table: str,
|
|
477
|
+
query_column: str,
|
|
478
|
+
key_column: str,
|
|
479
|
+
value_column: str,
|
|
480
|
+
table_mask: Optional[np.ndarray],
|
|
425
481
|
) -> pd.DataFrame:
|
|
426
482
|
"""
|
|
427
483
|
Find and extract a column from a different table.
|
|
@@ -437,15 +493,20 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
437
493
|
Returns:
|
|
438
494
|
|
|
439
495
|
"""
|
|
440
|
-
queries = self._parse_col_def_column_name(data=data, table=table, col_def=query_column)
|
|
441
|
-
keys = self._parse_col_def_column_name(data=data, table=other_table, col_def=key_column)
|
|
442
|
-
values = self._parse_col_def_column_name(data=data, table=other_table, col_def=value_column)
|
|
496
|
+
queries = self._parse_col_def_column_name(data=data, table=table, col_def=query_column, table_mask=table_mask)
|
|
497
|
+
keys = self._parse_col_def_column_name(data=data, table=other_table, col_def=key_column, table_mask=None)
|
|
498
|
+
values = self._parse_col_def_column_name(data=data, table=other_table, col_def=value_column, table_mask=None)
|
|
443
499
|
other = pd.concat([keys, values], axis=1)
|
|
444
500
|
result = queries.merge(other, how="left", left_on=query_column, right_on=key_column)
|
|
445
501
|
return result[[value_column]]
|
|
446
502
|
|
|
447
503
|
def _parse_col_def_filter(
|
|
448
|
-
self,
|
|
504
|
+
self,
|
|
505
|
+
data: TabularData,
|
|
506
|
+
table: str,
|
|
507
|
+
col_def: Dict[str, Any],
|
|
508
|
+
table_mask: Optional[np.ndarray],
|
|
509
|
+
extra_info: Optional[ExtraInfo],
|
|
449
510
|
) -> pd.DataFrame:
|
|
450
511
|
"""
|
|
451
512
|
Parse column filters like 'auto_id', 'reference', 'function', etc
|
|
@@ -464,6 +525,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
464
525
|
col_data = self._parse_auto_id(
|
|
465
526
|
data=data,
|
|
466
527
|
table=table,
|
|
528
|
+
table_mask=table_mask,
|
|
467
529
|
ref_table=sub_def.get("table"),
|
|
468
530
|
ref_name=sub_def.get("name"),
|
|
469
531
|
key_col_def=sub_def["key"],
|
|
@@ -481,15 +543,20 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
481
543
|
return self._parse_reference(
|
|
482
544
|
data=data,
|
|
483
545
|
table=table,
|
|
546
|
+
table_mask=table_mask,
|
|
484
547
|
other_table=sub_def["other_table"],
|
|
485
548
|
query_column=sub_def["query_column"],
|
|
486
549
|
key_column=sub_def["key_column"],
|
|
487
550
|
value_column=sub_def["value_column"],
|
|
488
551
|
)
|
|
489
552
|
elif isinstance(sub_def, list):
|
|
490
|
-
col_data = self._parse_pandas_function(
|
|
553
|
+
col_data = self._parse_pandas_function(
|
|
554
|
+
data=data, table=table, table_mask=table_mask, fn_name=name, col_def=sub_def
|
|
555
|
+
)
|
|
491
556
|
elif isinstance(sub_def, dict):
|
|
492
|
-
col_data = self._parse_function(
|
|
557
|
+
col_data = self._parse_function(
|
|
558
|
+
data=data, table=table, table_mask=table_mask, function=name, col_def=sub_def
|
|
559
|
+
)
|
|
493
560
|
else:
|
|
494
561
|
raise TypeError(f"Invalid {name} definition: {sub_def}")
|
|
495
562
|
data_frames.append(col_data)
|
|
@@ -502,6 +569,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
502
569
|
ref_table: Optional[str],
|
|
503
570
|
ref_name: Optional[str],
|
|
504
571
|
key_col_def: Union[str, List[str], Dict[str, str]],
|
|
572
|
+
table_mask: Optional[np.ndarray],
|
|
505
573
|
extra_info: Optional[ExtraInfo],
|
|
506
574
|
) -> pd.DataFrame:
|
|
507
575
|
"""
|
|
@@ -535,7 +603,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
535
603
|
else:
|
|
536
604
|
raise TypeError(f"Invalid key definition type '{type(key_col_def).__name__}': {key_col_def}")
|
|
537
605
|
|
|
538
|
-
col_data = self._parse_col_def(
|
|
606
|
+
col_data = self._parse_col_def(
|
|
607
|
+
data=data, table=table, table_mask=table_mask, col_def=key_col_def, extra_info=None
|
|
608
|
+
)
|
|
539
609
|
|
|
540
610
|
def auto_id(row: np.ndarray):
|
|
541
611
|
key = dict(zip(key_names, row))
|
|
@@ -558,7 +628,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
558
628
|
|
|
559
629
|
return col_data.apply(auto_id, axis=1, raw=True)
|
|
560
630
|
|
|
561
|
-
def _parse_pandas_function(
|
|
631
|
+
def _parse_pandas_function(
|
|
632
|
+
self, data: TabularData, table: str, fn_name: str, col_def: List[Any], table_mask: Optional[np.ndarray]
|
|
633
|
+
) -> pd.DataFrame:
|
|
562
634
|
"""Special vectorized functions.
|
|
563
635
|
|
|
564
636
|
Args:
|
|
@@ -576,7 +648,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
576
648
|
if fn_name == "multiply":
|
|
577
649
|
fn_name = "prod"
|
|
578
650
|
|
|
579
|
-
col_data = self._parse_col_def(data=data, table=table, col_def=col_def, extra_info=None)
|
|
651
|
+
col_data = self._parse_col_def(data=data, table=table, col_def=col_def, table_mask=table_mask, extra_info=None)
|
|
580
652
|
|
|
581
653
|
try:
|
|
582
654
|
fn_ptr = getattr(col_data, fn_name)
|
|
@@ -599,7 +671,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
599
671
|
|
|
600
672
|
return pd.DataFrame(fn_ptr(axis=1))
|
|
601
673
|
|
|
602
|
-
def _parse_function(
|
|
674
|
+
def _parse_function(
|
|
675
|
+
self, data: TabularData, table: str, function: str, col_def: Dict[str, Any], table_mask: Optional[np.ndarray]
|
|
676
|
+
) -> pd.DataFrame:
|
|
603
677
|
"""Import the function by name and apply it to each row.
|
|
604
678
|
|
|
605
679
|
Args:
|
|
@@ -616,7 +690,7 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
616
690
|
fn_ptr = get_function(function)
|
|
617
691
|
key_words = list(col_def.keys())
|
|
618
692
|
sub_def = list(col_def.values())
|
|
619
|
-
col_data = self._parse_col_def(data=data, table=table, col_def=sub_def, extra_info=None)
|
|
693
|
+
col_data = self._parse_col_def(data=data, table=table, col_def=sub_def, table_mask=table_mask, extra_info=None)
|
|
620
694
|
|
|
621
695
|
if col_data.empty:
|
|
622
696
|
raise ValueError(f"Cannot apply function {function} to an empty DataFrame")
|
|
@@ -624,7 +698,9 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
624
698
|
col_data = col_data.apply(lambda row, fn=fn_ptr: fn(**dict(zip(key_words, row))), axis=1, raw=True)
|
|
625
699
|
return pd.DataFrame(col_data)
|
|
626
700
|
|
|
627
|
-
def _parse_col_def_composite(
|
|
701
|
+
def _parse_col_def_composite(
|
|
702
|
+
self, data: TabularData, table: str, col_def: list, table_mask: Optional[np.ndarray]
|
|
703
|
+
) -> pd.DataFrame:
|
|
628
704
|
"""Select multiple columns (each is created from a column definition) and return them as a new DataFrame.
|
|
629
705
|
|
|
630
706
|
Args:
|
|
@@ -636,7 +712,10 @@ class TabularConverter(BaseConverter[TabularData]):
|
|
|
636
712
|
|
|
637
713
|
"""
|
|
638
714
|
assert isinstance(col_def, list)
|
|
639
|
-
columns = [
|
|
715
|
+
columns = [
|
|
716
|
+
self._parse_col_def(data=data, table=table, col_def=sub_def, table_mask=table_mask, extra_info=None)
|
|
717
|
+
for sub_def in col_def
|
|
718
|
+
]
|
|
640
719
|
return pd.concat(columns, axis=1)
|
|
641
720
|
|
|
642
721
|
def _get_id(self, table: str, key: Mapping[str, int], name: Optional[str]) -> int:
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: MPL-2.0
|
|
4
|
+
"""
|
|
5
|
+
These functions can be used in the mapping files to apply filter functions to vision data
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import List, Union
|
|
9
|
+
|
|
10
|
+
import pandas as pd
|
|
11
|
+
|
|
12
|
+
from power_grid_model_io.functions import has_value
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def exclude_empty(row: pd.Series, col: str) -> bool:
|
|
16
|
+
"""
|
|
17
|
+
filter out empty
|
|
18
|
+
"""
|
|
19
|
+
result = has_value(row[col])
|
|
20
|
+
if isinstance(result, pd.Series):
|
|
21
|
+
return result.item()
|
|
22
|
+
return result
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def exclude_value(row: pd.Series, col: str, value: Union[float, str]) -> bool:
|
|
26
|
+
"""
|
|
27
|
+
filter out by match value
|
|
28
|
+
"""
|
|
29
|
+
result = row[col] != value
|
|
30
|
+
if isinstance(result, pd.Series):
|
|
31
|
+
return result.item()
|
|
32
|
+
return result
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def exclude_all_columns_empty_or_zero(row: pd.Series, cols: List[str]) -> bool:
|
|
36
|
+
"""
|
|
37
|
+
filter out empty or zero values in multiple columns.
|
|
38
|
+
This is same as not all(not exclude_value or not exclude_empty)
|
|
39
|
+
"""
|
|
40
|
+
return any(exclude_value(row, col, 0) and exclude_empty(row, col) for col in cols)
|
|
@@ -3,14 +3,14 @@ power_grid_model_io/config/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXA
|
|
|
3
3
|
power_grid_model_io/config/examples/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
4
4
|
power_grid_model_io/config/examples/multipliers.yaml,sha256=UjsxWmxqLrTLGE0GI--9fKDqTkn3jY8n5KSYlwzh2o4,227
|
|
5
5
|
power_grid_model_io/config/excel/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
6
|
-
power_grid_model_io/config/excel/vision_en.yaml,sha256=
|
|
7
|
-
power_grid_model_io/config/excel/vision_en_9_7.yaml,sha256=
|
|
8
|
-
power_grid_model_io/config/excel/vision_nl.yaml,sha256=
|
|
6
|
+
power_grid_model_io/config/excel/vision_en.yaml,sha256=1EHPFqle3mpiPL8NZ-tQvi8YYww-8lylxwANpB2CPJk,19507
|
|
7
|
+
power_grid_model_io/config/excel/vision_en_9_7.yaml,sha256=mmLteTc-IcGIetrxz7W10tQF3YmNM2IojRYdsKN73co,19520
|
|
8
|
+
power_grid_model_io/config/excel/vision_nl.yaml,sha256=BZvpt78tst7S7UG3C-AcGA4XrZjmtDgcVy8RTCBMP6M,19721
|
|
9
9
|
power_grid_model_io/converters/__init__.py,sha256=kmbjFW6kVr30fmHb6mAoD7DQAqmbrsOuF1ewd8b0Q3M,408
|
|
10
10
|
power_grid_model_io/converters/base_converter.py,sha256=TICEXyG7PtimAUvhAmpSoZUqwa0shiDdwbWpiM2Ur7o,6048
|
|
11
11
|
power_grid_model_io/converters/pandapower_converter.py,sha256=6gY-mvbVLlKKBxGOM1024pekCtmMeGUxQunN19nYHio,113935
|
|
12
12
|
power_grid_model_io/converters/pgm_json_converter.py,sha256=uqsDGhf4hilzUQdQXZEfOYXWJnUpMQqTNHXnWQUY-So,13158
|
|
13
|
-
power_grid_model_io/converters/tabular_converter.py,sha256=
|
|
13
|
+
power_grid_model_io/converters/tabular_converter.py,sha256=eaBIqI2HOb5uwR1eVYDji5exNRd7aRFL0r38vEZIEzg,33398
|
|
14
14
|
power_grid_model_io/converters/vision_excel_converter.py,sha256=PSvbA5jKO-w7R7KIEstvx2pBjTgULoFU7SBG8MkAmSo,4129
|
|
15
15
|
power_grid_model_io/data_stores/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
16
16
|
power_grid_model_io/data_stores/base_data_store.py,sha256=DJfLtRwvx_tXKnpjtBdfbMqPjWc324Eo5WeKTXjWXqc,1706
|
|
@@ -23,6 +23,7 @@ power_grid_model_io/data_types/_data_types.py,sha256=9xH5vBGrRVUSlPh4HXmORtKo3LF
|
|
|
23
23
|
power_grid_model_io/data_types/tabular_data.py,sha256=sV6S4kqCEuQiNZTOdKS7CiA2M8Ny1oGXvtFoN-xkYBg,8582
|
|
24
24
|
power_grid_model_io/functions/__init__.py,sha256=pamhvKX5c_5fkVMRrUp6zhHWex2R63otRJk1Sfsw6y0,495
|
|
25
25
|
power_grid_model_io/functions/_functions.py,sha256=tqwwZ0G8AeDza0IiS6CSMwKB0lV1hDo2D8e9-ARHXQM,2843
|
|
26
|
+
power_grid_model_io/functions/filters.py,sha256=ZkUD6VaTrWLxsVqOvsUNXMzPtpTLKrsH9NdX2F_JxVU,1105
|
|
26
27
|
power_grid_model_io/functions/phase_to_phase.py,sha256=zbaDXIj8S4cLO42LjkpcQoUrEW1frzBUj1OmKu-xkTg,4459
|
|
27
28
|
power_grid_model_io/mappings/__init__.py,sha256=qwbj1j-Aa_yRB-E3j35pEVtF3mgH8CVIXAnog5mOry0,138
|
|
28
29
|
power_grid_model_io/mappings/field_mapping.py,sha256=YfrwKolNG06kIC1sbUYnYmxuOrbNbNo1dYtnF8rNItw,1659
|
|
@@ -39,8 +40,8 @@ power_grid_model_io/utils/modules.py,sha256=a4IdozSL-sOZcmIQON_aQS7-cpnCyt-3p7zs
|
|
|
39
40
|
power_grid_model_io/utils/parsing.py,sha256=XB1QSHnslIieFJBKFXZCtiydqpOqQBiX_CXDbItXgAQ,4522
|
|
40
41
|
power_grid_model_io/utils/uuid_excel_cvtr.py,sha256=H1iWhW_nluJBUJ3hK-Gc0xJjGnH5e35WrBz_fA3YXZs,7626
|
|
41
42
|
power_grid_model_io/utils/zip.py,sha256=VXHX4xWPPZbhOlZUAbMDy3MgQFzK6_l7sRvGXihNUY4,3875
|
|
42
|
-
power_grid_model_io-1.2.
|
|
43
|
-
power_grid_model_io-1.2.
|
|
44
|
-
power_grid_model_io-1.2.
|
|
45
|
-
power_grid_model_io-1.2.
|
|
46
|
-
power_grid_model_io-1.2.
|
|
43
|
+
power_grid_model_io-1.2.87.dist-info/LICENSE,sha256=7Pm2fWFFHHUG5lDHed1vl5CjzxObIXQglnYsEdtjo_k,14907
|
|
44
|
+
power_grid_model_io-1.2.87.dist-info/METADATA,sha256=RHNOY4JKrEpEtX6hiq-uEjlkJQE27tePkidA3Oqo2wI,8041
|
|
45
|
+
power_grid_model_io-1.2.87.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
|
|
46
|
+
power_grid_model_io-1.2.87.dist-info/top_level.txt,sha256=7sq9VveemMm2R0RgTBa4tH8y_xF4_1hxbufmX9OjCTo,20
|
|
47
|
+
power_grid_model_io-1.2.87.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|