power-grid-model 1.12.58__py3-none-win_amd64.whl → 1.12.59__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of power-grid-model might be problematic. Click here for more details.

Files changed (59) hide show
  1. power_grid_model/__init__.py +54 -54
  2. power_grid_model/_core/__init__.py +3 -3
  3. power_grid_model/_core/buffer_handling.py +493 -493
  4. power_grid_model/_core/data_handling.py +141 -141
  5. power_grid_model/_core/data_types.py +132 -132
  6. power_grid_model/_core/dataset_definitions.py +109 -109
  7. power_grid_model/_core/enum.py +226 -226
  8. power_grid_model/_core/error_handling.py +206 -206
  9. power_grid_model/_core/errors.py +130 -130
  10. power_grid_model/_core/index_integer.py +17 -17
  11. power_grid_model/_core/options.py +71 -71
  12. power_grid_model/_core/power_grid_core.py +563 -563
  13. power_grid_model/_core/power_grid_dataset.py +535 -535
  14. power_grid_model/_core/power_grid_meta.py +243 -243
  15. power_grid_model/_core/power_grid_model.py +686 -686
  16. power_grid_model/_core/power_grid_model_c/__init__.py +3 -3
  17. power_grid_model/_core/power_grid_model_c/bin/power_grid_model_c.dll +0 -0
  18. power_grid_model/_core/power_grid_model_c/get_pgm_dll_path.py +63 -63
  19. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/basics.h +255 -255
  20. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/buffer.h +108 -108
  21. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset.h +316 -316
  22. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset_definitions.h +1052 -1052
  23. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/handle.h +99 -99
  24. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/meta_data.h +189 -189
  25. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/model.h +125 -125
  26. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/options.h +142 -142
  27. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/serialization.h +118 -118
  28. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c.h +36 -36
  29. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/basics.hpp +65 -65
  30. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/buffer.hpp +61 -61
  31. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/dataset.hpp +220 -220
  32. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/handle.hpp +108 -108
  33. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/meta_data.hpp +84 -84
  34. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/model.hpp +63 -63
  35. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/options.hpp +52 -52
  36. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/serialization.hpp +124 -124
  37. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/utils.hpp +81 -81
  38. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp.hpp +19 -19
  39. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfigVersion.cmake +3 -3
  40. power_grid_model/_core/serialization.py +317 -317
  41. power_grid_model/_core/typing.py +20 -20
  42. power_grid_model/_core/utils.py +798 -798
  43. power_grid_model/data_types.py +321 -321
  44. power_grid_model/enum.py +27 -27
  45. power_grid_model/errors.py +37 -37
  46. power_grid_model/typing.py +43 -43
  47. power_grid_model/utils.py +473 -473
  48. power_grid_model/validation/__init__.py +25 -25
  49. power_grid_model/validation/_rules.py +1171 -1171
  50. power_grid_model/validation/_validation.py +1172 -1172
  51. power_grid_model/validation/assertions.py +93 -93
  52. power_grid_model/validation/errors.py +602 -602
  53. power_grid_model/validation/utils.py +313 -313
  54. {power_grid_model-1.12.58.dist-info → power_grid_model-1.12.59.dist-info}/METADATA +1 -1
  55. power_grid_model-1.12.59.dist-info/RECORD +65 -0
  56. power_grid_model-1.12.58.dist-info/RECORD +0 -65
  57. {power_grid_model-1.12.58.dist-info → power_grid_model-1.12.59.dist-info}/WHEEL +0 -0
  58. {power_grid_model-1.12.58.dist-info → power_grid_model-1.12.59.dist-info}/entry_points.txt +0 -0
  59. {power_grid_model-1.12.58.dist-info → power_grid_model-1.12.59.dist-info}/licenses/LICENSE +0 -0
@@ -1,686 +1,686 @@
1
- # SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
2
- #
3
- # SPDX-License-Identifier: MPL-2.0
4
-
5
- """
6
- Main power grid model class
7
- """
8
-
9
- from enum import IntEnum
10
-
11
- import numpy as np
12
-
13
- from power_grid_model._core.data_handling import (
14
- create_output_data,
15
- get_output_type,
16
- prepare_input_view,
17
- prepare_output_view,
18
- prepare_update_view,
19
- )
20
- from power_grid_model._core.data_types import Dataset, SingleDataset
21
- from power_grid_model._core.dataset_definitions import (
22
- ComponentType,
23
- ComponentTypeLike,
24
- _map_to_component_types,
25
- _str_to_component_type,
26
- )
27
- from power_grid_model._core.enum import (
28
- CalculationMethod,
29
- CalculationType,
30
- ShortCircuitVoltageScaling,
31
- TapChangingStrategy,
32
- _ExperimentalFeatures,
33
- )
34
- from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors
35
- from power_grid_model._core.index_integer import IdNp, IdxNp
36
- from power_grid_model._core.options import Options
37
- from power_grid_model._core.power_grid_core import ConstDatasetPtr, IDPtr, IdxPtr, ModelPtr, power_grid_core as pgc
38
- from power_grid_model._core.typing import ComponentAttributeMapping
39
-
40
-
41
- class PowerGridModel:
42
- """
43
- Main class for Power Grid Model
44
- """
45
-
46
- _model_ptr: ModelPtr
47
- _all_component_count: dict[ComponentType, int] | None
48
- _batch_error: PowerGridBatchError | None
49
-
50
- @property
51
- def batch_error(self) -> PowerGridBatchError | None:
52
- """
53
- Get the batch error object, if present, after a batch calculation with errors.
54
-
55
- Also works when continue_on_batch_error was set to True during the calculation.
56
-
57
- Returns:
58
- Batch error object, or None
59
- """
60
- return self._batch_error
61
-
62
- @property
63
- def _model(self):
64
- if not self._model_ptr:
65
- raise TypeError("You have an empty instance of PowerGridModel!")
66
- return self._model_ptr
67
-
68
- @property
69
- def all_component_count(self) -> dict[ComponentType, int]:
70
- """
71
- Get amount of elements per component type.
72
- If the count for a component type is zero, it will not be in the returned dictionary.
73
-
74
- Returns:
75
- A dictionary with
76
-
77
- - key: Component type name
78
- - value: Integer count of elements of this type
79
- """
80
- if self._all_component_count is None:
81
- raise TypeError("You have an empty instance of PowerGridModel!")
82
- return self._all_component_count
83
-
84
- def copy(self) -> "PowerGridModel":
85
- """
86
- Copy the current model
87
-
88
- Returns:
89
- A copy of PowerGridModel
90
- """
91
- new_model = PowerGridModel.__new__(PowerGridModel)
92
- new_model._model_ptr = pgc.copy_model(self._model)
93
- assert_no_error()
94
- new_model._all_component_count = self._all_component_count
95
- return new_model
96
-
97
- def __copy__(self):
98
- return self.copy()
99
-
100
- def __new__(cls, *_args, **_kwargs):
101
- instance = super().__new__(cls)
102
- instance._model_ptr = ModelPtr()
103
- instance._all_component_count = None
104
- return instance
105
-
106
- def __init__(self, input_data: SingleDataset, system_frequency: float = 50.0):
107
- """
108
- Initialize the model from an input data set.
109
-
110
- Args:
111
- input_data: Input data dictionary
112
-
113
- - key: Component type
114
- - value: Component data with the correct type :class:`SingleComponentData`
115
-
116
- system_frequency: Frequency of the power system, default 50 Hz
117
- """
118
- # destroy old instance
119
- pgc.destroy_model(self._model_ptr)
120
- self._all_component_count = None
121
- # create new
122
- prepared_input = prepare_input_view(_map_to_component_types(input_data))
123
- self._model_ptr = pgc.create_model(system_frequency, input_data=prepared_input.get_dataset_ptr())
124
- assert_no_error()
125
- self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0}
126
-
127
- def update(self, *, update_data: Dataset):
128
- """
129
- Update the model with changes.
130
-
131
- The model will be in an invalid state if the update fails and should be discarded.
132
-
133
- Args:
134
- update_data: Update data dictionary
135
-
136
- - key: Component type
137
- - value: Component data with the correct type :class:`ComponentData` (single scenario or batch)
138
-
139
- Raises:
140
- PowerGridError if the update fails. The model is left in an invalid state and should be discarded.
141
-
142
- Returns:
143
- None
144
- """
145
- prepared_update = prepare_update_view(_map_to_component_types(update_data))
146
- pgc.update_model(self._model, prepared_update.get_dataset_ptr())
147
- assert_no_error()
148
-
149
- def get_indexer(self, component_type: ComponentTypeLike, ids: np.ndarray):
150
- """
151
- Get array of indexers given array of ids for component type.
152
-
153
- This enables syntax like input_data[ComponentType.node][get_indexer(ids)]
154
-
155
- Args:
156
- component_type: Type of component
157
- ids: Array of ids
158
-
159
- Returns:
160
- Array of indexers, same shape as input array ids
161
- """
162
- component_type = _str_to_component_type(component_type)
163
- ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)
164
- indexer = np.empty_like(ids, dtype=IdxNp, order="C")
165
- indexer_c = indexer.ctypes.data_as(IdxPtr)
166
- size = ids.size
167
- # call c function
168
- pgc.get_indexer(self._model, component_type, size, ids_c, indexer_c)
169
- assert_no_error()
170
- return indexer
171
-
172
- def _get_output_component_count(self, calculation_type: CalculationType):
173
- exclude_types = {
174
- CalculationType.power_flow: [
175
- ComponentType.sym_voltage_sensor,
176
- ComponentType.asym_voltage_sensor,
177
- ComponentType.sym_power_sensor,
178
- ComponentType.asym_power_sensor,
179
- ComponentType.fault,
180
- ],
181
- CalculationType.state_estimation: [ComponentType.fault],
182
- CalculationType.short_circuit: [
183
- ComponentType.sym_voltage_sensor,
184
- ComponentType.asym_voltage_sensor,
185
- ComponentType.sym_power_sensor,
186
- ComponentType.asym_power_sensor,
187
- ],
188
- }.get(calculation_type, [])
189
-
190
- def include_type(component_type: ComponentType):
191
- return all(exclude_type.value not in component_type.value for exclude_type in exclude_types)
192
-
193
- return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)}
194
-
195
- def _construct_output(
196
- self,
197
- output_component_types: ComponentAttributeMapping,
198
- calculation_type: CalculationType,
199
- symmetric: bool,
200
- is_batch: bool,
201
- batch_size: int,
202
- ) -> dict[ComponentType, np.ndarray]:
203
- all_component_count = self._get_output_component_count(calculation_type=calculation_type)
204
- return create_output_data(
205
- output_component_types=output_component_types,
206
- output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
207
- all_component_count=all_component_count,
208
- is_batch=is_batch,
209
- batch_size=batch_size,
210
- )
211
-
212
- @staticmethod
213
- def _options(**kwargs) -> Options:
214
- def as_enum_value(key_enum: str, type_: type[IntEnum]):
215
- if key_enum in kwargs:
216
- value_enum = kwargs[key_enum]
217
- if isinstance(value_enum, str):
218
- kwargs[key_enum] = type_[value_enum]
219
-
220
- as_enum_value("calculation_method", CalculationMethod)
221
- as_enum_value("tap_changing_strategy", TapChangingStrategy)
222
- as_enum_value("short_circuit_voltage_scaling", ShortCircuitVoltageScaling)
223
- as_enum_value("experimental_features", _ExperimentalFeatures)
224
-
225
- opt = Options()
226
- for key, value in kwargs.items():
227
- setattr(opt, key, value.value if isinstance(value, IntEnum) else value)
228
- return opt
229
-
230
- def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_error: bool):
231
- self._batch_error = handle_errors(
232
- continue_on_batch_error=continue_on_batch_error,
233
- batch_size=batch_size,
234
- decode_error=decode_error,
235
- )
236
-
237
- def _calculate_impl( # noqa: PLR0913
238
- self,
239
- calculation_type: CalculationType,
240
- symmetric: bool,
241
- update_data: Dataset | None,
242
- output_component_types: ComponentAttributeMapping,
243
- options: Options,
244
- continue_on_batch_error: bool,
245
- decode_error: bool,
246
- experimental_features: _ExperimentalFeatures | str, # NOSONAR # noqa: ARG002
247
- ):
248
- """
249
- Core calculation routine
250
-
251
- Args:
252
- calculation_type:
253
- symmetric:
254
- update_data:
255
- output_component_types:
256
- options:
257
- continue_on_batch_error:
258
- decode_error:
259
-
260
- Returns:
261
- """
262
- self._batch_error = None
263
- is_batch = update_data is not None
264
-
265
- if update_data is not None:
266
- prepared_update = prepare_update_view(update_data)
267
- update_ptr = prepared_update.get_dataset_ptr()
268
- batch_size = prepared_update.get_info().batch_size()
269
- else:
270
- update_ptr = ConstDatasetPtr()
271
- batch_size = 1
272
-
273
- output_data = self._construct_output(
274
- output_component_types=output_component_types,
275
- calculation_type=calculation_type,
276
- symmetric=symmetric,
277
- is_batch=is_batch,
278
- batch_size=batch_size,
279
- )
280
- prepared_result = prepare_output_view(
281
- output_data=output_data,
282
- output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
283
- )
284
-
285
- # run calculation
286
- pgc.calculate(
287
- # model and options
288
- self._model,
289
- options.opt,
290
- output_data=prepared_result.get_dataset_ptr(),
291
- update_data=update_ptr,
292
- )
293
-
294
- self._handle_errors(
295
- continue_on_batch_error=continue_on_batch_error,
296
- batch_size=batch_size,
297
- decode_error=decode_error,
298
- )
299
-
300
- return output_data
301
-
302
- def _calculate_power_flow( # noqa: PLR0913
303
- self,
304
- *,
305
- symmetric: bool = True,
306
- error_tolerance: float = 1e-8,
307
- max_iterations: int = 20,
308
- calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
309
- update_data: Dataset | None = None,
310
- threading: int = -1,
311
- output_component_types: ComponentAttributeMapping = None,
312
- continue_on_batch_error: bool = False,
313
- decode_error: bool = True,
314
- tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
315
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
316
- ):
317
- calculation_type = CalculationType.power_flow
318
- options = self._options(
319
- calculation_type=calculation_type,
320
- symmetric=symmetric,
321
- error_tolerance=error_tolerance,
322
- max_iterations=max_iterations,
323
- calculation_method=calculation_method,
324
- tap_changing_strategy=tap_changing_strategy,
325
- threading=threading,
326
- experimental_features=experimental_features,
327
- )
328
- return self._calculate_impl(
329
- calculation_type=calculation_type,
330
- symmetric=symmetric,
331
- update_data=update_data,
332
- output_component_types=output_component_types,
333
- options=options,
334
- continue_on_batch_error=continue_on_batch_error,
335
- decode_error=decode_error,
336
- experimental_features=experimental_features,
337
- )
338
-
339
- def _calculate_state_estimation( # noqa: PLR0913
340
- self,
341
- *,
342
- symmetric: bool = True,
343
- error_tolerance: float = 1e-8,
344
- max_iterations: int = 20,
345
- calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
346
- update_data: Dataset | None = None,
347
- threading: int = -1,
348
- output_component_types: ComponentAttributeMapping = None,
349
- continue_on_batch_error: bool = False,
350
- decode_error: bool = True,
351
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
352
- ) -> dict[ComponentType, np.ndarray]:
353
- calculation_type = CalculationType.state_estimation
354
- options = self._options(
355
- calculation_type=calculation_type,
356
- symmetric=symmetric,
357
- error_tolerance=error_tolerance,
358
- max_iterations=max_iterations,
359
- calculation_method=calculation_method,
360
- threading=threading,
361
- experimental_features=experimental_features,
362
- )
363
- return self._calculate_impl(
364
- calculation_type=calculation_type,
365
- symmetric=symmetric,
366
- update_data=update_data,
367
- output_component_types=output_component_types,
368
- options=options,
369
- continue_on_batch_error=continue_on_batch_error,
370
- decode_error=decode_error,
371
- experimental_features=experimental_features,
372
- )
373
-
374
- def _calculate_short_circuit( # noqa: PLR0913
375
- self,
376
- *,
377
- calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
378
- update_data: Dataset | None = None,
379
- threading: int = -1,
380
- output_component_types: ComponentAttributeMapping = None,
381
- continue_on_batch_error: bool = False,
382
- decode_error: bool = True,
383
- short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
384
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
385
- ) -> dict[ComponentType, np.ndarray]:
386
- calculation_type = CalculationType.short_circuit
387
- symmetric = False
388
-
389
- options = self._options(
390
- calculation_type=calculation_type,
391
- symmetric=symmetric,
392
- calculation_method=calculation_method,
393
- threading=threading,
394
- short_circuit_voltage_scaling=short_circuit_voltage_scaling,
395
- experimental_features=experimental_features,
396
- )
397
- return self._calculate_impl(
398
- calculation_type=calculation_type,
399
- symmetric=symmetric,
400
- update_data=update_data,
401
- output_component_types=output_component_types,
402
- options=options,
403
- continue_on_batch_error=continue_on_batch_error,
404
- decode_error=decode_error,
405
- experimental_features=experimental_features,
406
- )
407
-
408
- def calculate_power_flow( # noqa: PLR0913
409
- self,
410
- *,
411
- symmetric: bool = True,
412
- error_tolerance: float = 1e-8,
413
- max_iterations: int = 20,
414
- calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
415
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
416
- threading: int = -1,
417
- output_component_types: ComponentAttributeMapping = None,
418
- continue_on_batch_error: bool = False,
419
- decode_error: bool = True,
420
- tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
421
- ) -> dict[ComponentType, np.ndarray]:
422
- """
423
- Calculate power flow once with the current model attributes.
424
- Or calculate in batch with the given update dataset in batch.
425
-
426
- Args:
427
- symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
428
-
429
- - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
430
- - False: Three-phase asymmetric calculation.
431
- error_tolerance (float, optional): Error tolerance for voltage in p.u., applicable only when the
432
- calculation method is iterative.
433
- max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
434
- is iterative.
435
- calculation_method (an enumeration or string): The calculation method to use.
436
-
437
- - newton_raphson: Use Newton-Raphson iterative method (default).
438
- - linear: Use linear method.
439
- update_data (dict, optional):
440
- None: Calculate power flow once with the current model attributes.
441
- Or a dictionary for batch calculation with batch update.
442
-
443
- - key: Component type name to be updated in batch.
444
- - value:
445
-
446
- - For homogeneous update batch (a 2D numpy structured array):
447
-
448
- - Dimension 0: Each batch.
449
- - Dimension 1: Each updated element per batch for this component type.
450
- - For inhomogeneous update batch (a dictionary containing two keys):
451
-
452
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
453
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
454
- compressed sparse structure.
455
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
456
- - data: 1D numpy structured array in flat.
457
- threading (int, optional): Applicable only for batch calculation.
458
-
459
- - < 0: Sequential
460
- - = 0: Parallel, use number of hardware threads
461
- - > 0: Specify number of parallel threads
462
- output_component_types (ComponentAttributeMapping):
463
-
464
- - None: Row based data for all component types.
465
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
466
- - ComponentAttributeFilterOptions: Columnar data for all component types.
467
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
468
- key: ComponentType
469
- value:
470
- - None: Row based data for the specified component types.
471
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
472
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
473
- continue_on_batch_error (bool, optional):
474
- Continue the program (instead of throwing error) if some scenarios fail.
475
- You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
476
- decode_error (bool, optional):
477
- Decode error messages to their derived types if possible.
478
-
479
- Returns:
480
- Dictionary of results of all components.
481
-
482
- - key: Component type name to be updated in batch.
483
- - value:
484
-
485
- - For single calculation: 1D numpy structured array for the results of this component type.
486
- - For batch calculation: 2D numpy structured array for the results of this component type.
487
-
488
- - Dimension 0: Each batch.
489
- - Dimension 1: The result of each element for this component type.
490
-
491
- Raises:
492
- Exception: In case an error in the core occurs, an exception will be thrown.
493
- """
494
- return self._calculate_power_flow(
495
- symmetric=symmetric,
496
- error_tolerance=error_tolerance,
497
- max_iterations=max_iterations,
498
- calculation_method=calculation_method,
499
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
500
- threading=threading,
501
- output_component_types=output_component_types,
502
- continue_on_batch_error=continue_on_batch_error,
503
- decode_error=decode_error,
504
- tap_changing_strategy=tap_changing_strategy,
505
- )
506
-
507
- def calculate_state_estimation( # noqa: PLR0913
508
- self,
509
- *,
510
- symmetric: bool = True,
511
- error_tolerance: float = 1e-8,
512
- max_iterations: int = 20,
513
- calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
514
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
515
- threading: int = -1,
516
- output_component_types: ComponentAttributeMapping = None,
517
- continue_on_batch_error: bool = False,
518
- decode_error: bool = True,
519
- ) -> dict[ComponentType, np.ndarray]:
520
- """
521
- Calculate state estimation once with the current model attributes.
522
- Or calculate in batch with the given update dataset in batch.
523
-
524
- Args:
525
- symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
526
-
527
- - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
528
- - False: Three-phase asymmetric calculation.
529
- error_tolerance (float, optional): error tolerance for voltage in p.u., only applicable when the
530
- calculation method is iterative.
531
- max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
532
- is iterative.
533
- calculation_method (an enumeration): Use iterative linear method.
534
- update_data (dict, optional):
535
- None: Calculate state estimation once with the current model attributes.
536
- Or a dictionary for batch calculation with batch update.
537
-
538
- - key: Component type name to be updated in batch.
539
- - value:
540
-
541
- - For homogeneous update batch (a 2D numpy structured array):
542
-
543
- - Dimension 0: Each batch.
544
- - Dimension 1: Each updated element per batch for this component type.
545
- - For inhomogeneous update batch (a dictionary containing two keys):
546
-
547
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
548
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
549
- compressed sparse structure.
550
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
551
- - data: 1D numpy structured array in flat.
552
- threading (int, optional): Applicable only for batch calculation.
553
-
554
- - < 0: Sequential
555
- - = 0: Parallel, use number of hardware threads
556
- - > 0: Specify number of parallel threads
557
- output_component_types (ComponentAttributeMapping):
558
-
559
- - None: Row based data for all component types.
560
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
561
- - ComponentAttributeFilterOptions: Columnar data for all component types.
562
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
563
- key: ComponentType
564
- value:
565
- - None: Row based data for the specified component types.
566
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
567
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
568
- continue_on_batch_error (bool, optional):
569
- Continue the program (instead of throwing error) if some scenarios fail.
570
- You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
571
- decode_error (bool, optional):
572
- Decode error messages to their derived types if possible.
573
-
574
- Returns:
575
- Dictionary of results of all components.
576
-
577
- - key: Component type name to be updated in batch.
578
- - value:
579
-
580
- - For single calculation: 1D numpy structured array for the results of this component type.
581
- - For batch calculation: 2D numpy structured array for the results of this component type.
582
-
583
- - Dimension 0: Each batch.
584
- - Dimension 1: The result of each element for this component type.
585
-
586
- Raises:
587
- Exception: In case an error in the core occurs, an exception will be thrown.
588
- """
589
- return self._calculate_state_estimation(
590
- symmetric=symmetric,
591
- error_tolerance=error_tolerance,
592
- max_iterations=max_iterations,
593
- calculation_method=calculation_method,
594
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
595
- threading=threading,
596
- output_component_types=output_component_types,
597
- continue_on_batch_error=continue_on_batch_error,
598
- decode_error=decode_error,
599
- )
600
-
601
- def calculate_short_circuit( # noqa: PLR0913
602
- self,
603
- *,
604
- calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
605
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
606
- threading: int = -1,
607
- output_component_types: ComponentAttributeMapping = None,
608
- continue_on_batch_error: bool = False,
609
- decode_error: bool = True,
610
- short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
611
- ) -> dict[ComponentType, np.ndarray]:
612
- """
613
- Calculate a short circuit once with the current model attributes.
614
- Or calculate in batch with the given update dataset in batch
615
-
616
- Args:
617
- calculation_method (an enumeration): Use the iec60909 standard.
618
- update_data:
619
- None: calculate a short circuit once with the current model attributes.
620
- Or a dictionary for batch calculation with batch update
621
-
622
- - key: Component type name to be updated in batch
623
- - value:
624
-
625
- - For homogeneous update batch (a 2D numpy structured array):
626
-
627
- - Dimension 0: each batch
628
- - Dimension 1: each updated element per batch for this component type
629
- - For inhomogeneous update batch (a dictionary containing two keys):
630
-
631
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
632
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
633
- compressed sparse structure.
634
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
635
- - data: 1D numpy structured array in flat.
636
- threading (int, optional): Applicable only for batch calculation.
637
-
638
- - < 0: Sequential
639
- - = 0: Parallel, use number of hardware threads
640
- - > 0: Specify number of parallel threads
641
- output_component_types (ComponentAttributeMapping):
642
-
643
- - None: Row based data for all component types.
644
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
645
- - ComponentAttributeFilterOptions: Columnar data for all component types.
646
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
647
- key: ComponentType
648
- value:
649
- - None: Row based data for the specified component types.
650
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
651
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
652
- continue_on_batch_error (bool, optional):
653
- Continue the program (instead of throwing error) if some scenarios fail.
654
- You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
655
- decode_error (bool, optional):
656
- Decode error messages to their derived types if possible.
657
- short_circuit_voltage_scaling ({ShortCircuitVoltageSaling, str}, optional):
658
- Whether to use the maximum or minimum voltage scaling.
659
- By default, the maximum voltage scaling is used to calculate the short circuit.
660
-
661
- Returns:
662
- Dictionary of results of all components.
663
-
664
- - key: Component type name to be updated in batch.
665
- - value:
666
-
667
- - For single calculation: 1D numpy structured array for the results of this component type.
668
- - For batch calculation: 2D numpy structured array for the results of this component type.
669
-
670
- - Dimension 0: Each batch.
671
- - Dimension 1: The result of each element for this component type.
672
- Raises:
673
- Exception: In case an error in the core occurs, an exception will be thrown.
674
- """
675
- return self._calculate_short_circuit(
676
- calculation_method=calculation_method,
677
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
678
- threading=threading,
679
- output_component_types=output_component_types,
680
- continue_on_batch_error=continue_on_batch_error,
681
- decode_error=decode_error,
682
- short_circuit_voltage_scaling=short_circuit_voltage_scaling,
683
- )
684
-
685
- def __del__(self):
686
- pgc.destroy_model(self._model_ptr)
1
+ # SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
2
+ #
3
+ # SPDX-License-Identifier: MPL-2.0
4
+
5
+ """
6
+ Main power grid model class
7
+ """
8
+
9
+ from enum import IntEnum
10
+
11
+ import numpy as np
12
+
13
+ from power_grid_model._core.data_handling import (
14
+ create_output_data,
15
+ get_output_type,
16
+ prepare_input_view,
17
+ prepare_output_view,
18
+ prepare_update_view,
19
+ )
20
+ from power_grid_model._core.data_types import Dataset, SingleDataset
21
+ from power_grid_model._core.dataset_definitions import (
22
+ ComponentType,
23
+ ComponentTypeLike,
24
+ _map_to_component_types,
25
+ _str_to_component_type,
26
+ )
27
+ from power_grid_model._core.enum import (
28
+ CalculationMethod,
29
+ CalculationType,
30
+ ShortCircuitVoltageScaling,
31
+ TapChangingStrategy,
32
+ _ExperimentalFeatures,
33
+ )
34
+ from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors
35
+ from power_grid_model._core.index_integer import IdNp, IdxNp
36
+ from power_grid_model._core.options import Options
37
+ from power_grid_model._core.power_grid_core import ConstDatasetPtr, IDPtr, IdxPtr, ModelPtr, power_grid_core as pgc
38
+ from power_grid_model._core.typing import ComponentAttributeMapping
39
+
40
+
41
+ class PowerGridModel:
42
+ """
43
+ Main class for Power Grid Model
44
+ """
45
+
46
+ _model_ptr: ModelPtr
47
+ _all_component_count: dict[ComponentType, int] | None
48
+ _batch_error: PowerGridBatchError | None
49
+
50
+ @property
51
+ def batch_error(self) -> PowerGridBatchError | None:
52
+ """
53
+ Get the batch error object, if present, after a batch calculation with errors.
54
+
55
+ Also works when continue_on_batch_error was set to True during the calculation.
56
+
57
+ Returns:
58
+ Batch error object, or None
59
+ """
60
+ return self._batch_error
61
+
62
+ @property
63
+ def _model(self):
64
+ if not self._model_ptr:
65
+ raise TypeError("You have an empty instance of PowerGridModel!")
66
+ return self._model_ptr
67
+
68
+ @property
69
+ def all_component_count(self) -> dict[ComponentType, int]:
70
+ """
71
+ Get amount of elements per component type.
72
+ If the count for a component type is zero, it will not be in the returned dictionary.
73
+
74
+ Returns:
75
+ A dictionary with
76
+
77
+ - key: Component type name
78
+ - value: Integer count of elements of this type
79
+ """
80
+ if self._all_component_count is None:
81
+ raise TypeError("You have an empty instance of PowerGridModel!")
82
+ return self._all_component_count
83
+
84
+ def copy(self) -> "PowerGridModel":
85
+ """
86
+ Copy the current model
87
+
88
+ Returns:
89
+ A copy of PowerGridModel
90
+ """
91
+ new_model = PowerGridModel.__new__(PowerGridModel)
92
+ new_model._model_ptr = pgc.copy_model(self._model)
93
+ assert_no_error()
94
+ new_model._all_component_count = self._all_component_count
95
+ return new_model
96
+
97
+ def __copy__(self):
98
+ return self.copy()
99
+
100
+ def __new__(cls, *_args, **_kwargs):
101
+ instance = super().__new__(cls)
102
+ instance._model_ptr = ModelPtr()
103
+ instance._all_component_count = None
104
+ return instance
105
+
106
+ def __init__(self, input_data: SingleDataset, system_frequency: float = 50.0):
107
+ """
108
+ Initialize the model from an input data set.
109
+
110
+ Args:
111
+ input_data: Input data dictionary
112
+
113
+ - key: Component type
114
+ - value: Component data with the correct type :class:`SingleComponentData`
115
+
116
+ system_frequency: Frequency of the power system, default 50 Hz
117
+ """
118
+ # destroy old instance
119
+ pgc.destroy_model(self._model_ptr)
120
+ self._all_component_count = None
121
+ # create new
122
+ prepared_input = prepare_input_view(_map_to_component_types(input_data))
123
+ self._model_ptr = pgc.create_model(system_frequency, input_data=prepared_input.get_dataset_ptr())
124
+ assert_no_error()
125
+ self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0}
126
+
127
+ def update(self, *, update_data: Dataset):
128
+ """
129
+ Update the model with changes.
130
+
131
+ The model will be in an invalid state if the update fails and should be discarded.
132
+
133
+ Args:
134
+ update_data: Update data dictionary
135
+
136
+ - key: Component type
137
+ - value: Component data with the correct type :class:`ComponentData` (single scenario or batch)
138
+
139
+ Raises:
140
+ PowerGridError if the update fails. The model is left in an invalid state and should be discarded.
141
+
142
+ Returns:
143
+ None
144
+ """
145
+ prepared_update = prepare_update_view(_map_to_component_types(update_data))
146
+ pgc.update_model(self._model, prepared_update.get_dataset_ptr())
147
+ assert_no_error()
148
+
149
+ def get_indexer(self, component_type: ComponentTypeLike, ids: np.ndarray):
150
+ """
151
+ Get array of indexers given array of ids for component type.
152
+
153
+ This enables syntax like input_data[ComponentType.node][get_indexer(ids)]
154
+
155
+ Args:
156
+ component_type: Type of component
157
+ ids: Array of ids
158
+
159
+ Returns:
160
+ Array of indexers, same shape as input array ids
161
+ """
162
+ component_type = _str_to_component_type(component_type)
163
+ ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)
164
+ indexer = np.empty_like(ids, dtype=IdxNp, order="C")
165
+ indexer_c = indexer.ctypes.data_as(IdxPtr)
166
+ size = ids.size
167
+ # call c function
168
+ pgc.get_indexer(self._model, component_type, size, ids_c, indexer_c)
169
+ assert_no_error()
170
+ return indexer
171
+
172
+ def _get_output_component_count(self, calculation_type: CalculationType):
173
+ exclude_types = {
174
+ CalculationType.power_flow: [
175
+ ComponentType.sym_voltage_sensor,
176
+ ComponentType.asym_voltage_sensor,
177
+ ComponentType.sym_power_sensor,
178
+ ComponentType.asym_power_sensor,
179
+ ComponentType.fault,
180
+ ],
181
+ CalculationType.state_estimation: [ComponentType.fault],
182
+ CalculationType.short_circuit: [
183
+ ComponentType.sym_voltage_sensor,
184
+ ComponentType.asym_voltage_sensor,
185
+ ComponentType.sym_power_sensor,
186
+ ComponentType.asym_power_sensor,
187
+ ],
188
+ }.get(calculation_type, [])
189
+
190
+ def include_type(component_type: ComponentType):
191
+ return all(exclude_type.value not in component_type.value for exclude_type in exclude_types)
192
+
193
+ return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)}
194
+
195
+ def _construct_output(
196
+ self,
197
+ output_component_types: ComponentAttributeMapping,
198
+ calculation_type: CalculationType,
199
+ symmetric: bool,
200
+ is_batch: bool,
201
+ batch_size: int,
202
+ ) -> dict[ComponentType, np.ndarray]:
203
+ all_component_count = self._get_output_component_count(calculation_type=calculation_type)
204
+ return create_output_data(
205
+ output_component_types=output_component_types,
206
+ output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
207
+ all_component_count=all_component_count,
208
+ is_batch=is_batch,
209
+ batch_size=batch_size,
210
+ )
211
+
212
+ @staticmethod
213
+ def _options(**kwargs) -> Options:
214
+ def as_enum_value(key_enum: str, type_: type[IntEnum]):
215
+ if key_enum in kwargs:
216
+ value_enum = kwargs[key_enum]
217
+ if isinstance(value_enum, str):
218
+ kwargs[key_enum] = type_[value_enum]
219
+
220
+ as_enum_value("calculation_method", CalculationMethod)
221
+ as_enum_value("tap_changing_strategy", TapChangingStrategy)
222
+ as_enum_value("short_circuit_voltage_scaling", ShortCircuitVoltageScaling)
223
+ as_enum_value("experimental_features", _ExperimentalFeatures)
224
+
225
+ opt = Options()
226
+ for key, value in kwargs.items():
227
+ setattr(opt, key, value.value if isinstance(value, IntEnum) else value)
228
+ return opt
229
+
230
+ def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_error: bool):
231
+ self._batch_error = handle_errors(
232
+ continue_on_batch_error=continue_on_batch_error,
233
+ batch_size=batch_size,
234
+ decode_error=decode_error,
235
+ )
236
+
237
+ def _calculate_impl( # noqa: PLR0913
238
+ self,
239
+ calculation_type: CalculationType,
240
+ symmetric: bool,
241
+ update_data: Dataset | None,
242
+ output_component_types: ComponentAttributeMapping,
243
+ options: Options,
244
+ continue_on_batch_error: bool,
245
+ decode_error: bool,
246
+ experimental_features: _ExperimentalFeatures | str, # NOSONAR # noqa: ARG002
247
+ ):
248
+ """
249
+ Core calculation routine
250
+
251
+ Args:
252
+ calculation_type:
253
+ symmetric:
254
+ update_data:
255
+ output_component_types:
256
+ options:
257
+ continue_on_batch_error:
258
+ decode_error:
259
+
260
+ Returns:
261
+ """
262
+ self._batch_error = None
263
+ is_batch = update_data is not None
264
+
265
+ if update_data is not None:
266
+ prepared_update = prepare_update_view(update_data)
267
+ update_ptr = prepared_update.get_dataset_ptr()
268
+ batch_size = prepared_update.get_info().batch_size()
269
+ else:
270
+ update_ptr = ConstDatasetPtr()
271
+ batch_size = 1
272
+
273
+ output_data = self._construct_output(
274
+ output_component_types=output_component_types,
275
+ calculation_type=calculation_type,
276
+ symmetric=symmetric,
277
+ is_batch=is_batch,
278
+ batch_size=batch_size,
279
+ )
280
+ prepared_result = prepare_output_view(
281
+ output_data=output_data,
282
+ output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
283
+ )
284
+
285
+ # run calculation
286
+ pgc.calculate(
287
+ # model and options
288
+ self._model,
289
+ options.opt,
290
+ output_data=prepared_result.get_dataset_ptr(),
291
+ update_data=update_ptr,
292
+ )
293
+
294
+ self._handle_errors(
295
+ continue_on_batch_error=continue_on_batch_error,
296
+ batch_size=batch_size,
297
+ decode_error=decode_error,
298
+ )
299
+
300
+ return output_data
301
+
302
+ def _calculate_power_flow( # noqa: PLR0913
303
+ self,
304
+ *,
305
+ symmetric: bool = True,
306
+ error_tolerance: float = 1e-8,
307
+ max_iterations: int = 20,
308
+ calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
309
+ update_data: Dataset | None = None,
310
+ threading: int = -1,
311
+ output_component_types: ComponentAttributeMapping = None,
312
+ continue_on_batch_error: bool = False,
313
+ decode_error: bool = True,
314
+ tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
315
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
316
+ ):
317
+ calculation_type = CalculationType.power_flow
318
+ options = self._options(
319
+ calculation_type=calculation_type,
320
+ symmetric=symmetric,
321
+ error_tolerance=error_tolerance,
322
+ max_iterations=max_iterations,
323
+ calculation_method=calculation_method,
324
+ tap_changing_strategy=tap_changing_strategy,
325
+ threading=threading,
326
+ experimental_features=experimental_features,
327
+ )
328
+ return self._calculate_impl(
329
+ calculation_type=calculation_type,
330
+ symmetric=symmetric,
331
+ update_data=update_data,
332
+ output_component_types=output_component_types,
333
+ options=options,
334
+ continue_on_batch_error=continue_on_batch_error,
335
+ decode_error=decode_error,
336
+ experimental_features=experimental_features,
337
+ )
338
+
339
+ def _calculate_state_estimation( # noqa: PLR0913
340
+ self,
341
+ *,
342
+ symmetric: bool = True,
343
+ error_tolerance: float = 1e-8,
344
+ max_iterations: int = 20,
345
+ calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
346
+ update_data: Dataset | None = None,
347
+ threading: int = -1,
348
+ output_component_types: ComponentAttributeMapping = None,
349
+ continue_on_batch_error: bool = False,
350
+ decode_error: bool = True,
351
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
352
+ ) -> dict[ComponentType, np.ndarray]:
353
+ calculation_type = CalculationType.state_estimation
354
+ options = self._options(
355
+ calculation_type=calculation_type,
356
+ symmetric=symmetric,
357
+ error_tolerance=error_tolerance,
358
+ max_iterations=max_iterations,
359
+ calculation_method=calculation_method,
360
+ threading=threading,
361
+ experimental_features=experimental_features,
362
+ )
363
+ return self._calculate_impl(
364
+ calculation_type=calculation_type,
365
+ symmetric=symmetric,
366
+ update_data=update_data,
367
+ output_component_types=output_component_types,
368
+ options=options,
369
+ continue_on_batch_error=continue_on_batch_error,
370
+ decode_error=decode_error,
371
+ experimental_features=experimental_features,
372
+ )
373
+
374
+ def _calculate_short_circuit( # noqa: PLR0913
375
+ self,
376
+ *,
377
+ calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
378
+ update_data: Dataset | None = None,
379
+ threading: int = -1,
380
+ output_component_types: ComponentAttributeMapping = None,
381
+ continue_on_batch_error: bool = False,
382
+ decode_error: bool = True,
383
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
384
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
385
+ ) -> dict[ComponentType, np.ndarray]:
386
+ calculation_type = CalculationType.short_circuit
387
+ symmetric = False
388
+
389
+ options = self._options(
390
+ calculation_type=calculation_type,
391
+ symmetric=symmetric,
392
+ calculation_method=calculation_method,
393
+ threading=threading,
394
+ short_circuit_voltage_scaling=short_circuit_voltage_scaling,
395
+ experimental_features=experimental_features,
396
+ )
397
+ return self._calculate_impl(
398
+ calculation_type=calculation_type,
399
+ symmetric=symmetric,
400
+ update_data=update_data,
401
+ output_component_types=output_component_types,
402
+ options=options,
403
+ continue_on_batch_error=continue_on_batch_error,
404
+ decode_error=decode_error,
405
+ experimental_features=experimental_features,
406
+ )
407
+
408
+ def calculate_power_flow( # noqa: PLR0913
409
+ self,
410
+ *,
411
+ symmetric: bool = True,
412
+ error_tolerance: float = 1e-8,
413
+ max_iterations: int = 20,
414
+ calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
415
+ update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
416
+ threading: int = -1,
417
+ output_component_types: ComponentAttributeMapping = None,
418
+ continue_on_batch_error: bool = False,
419
+ decode_error: bool = True,
420
+ tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
421
+ ) -> dict[ComponentType, np.ndarray]:
422
+ """
423
+ Calculate power flow once with the current model attributes.
424
+ Or calculate in batch with the given update dataset in batch.
425
+
426
+ Args:
427
+ symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
428
+
429
+ - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
430
+ - False: Three-phase asymmetric calculation.
431
+ error_tolerance (float, optional): Error tolerance for voltage in p.u., applicable only when the
432
+ calculation method is iterative.
433
+ max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
434
+ is iterative.
435
+ calculation_method (an enumeration or string): The calculation method to use.
436
+
437
+ - newton_raphson: Use Newton-Raphson iterative method (default).
438
+ - linear: Use linear method.
439
+ update_data (dict, optional):
440
+ None: Calculate power flow once with the current model attributes.
441
+ Or a dictionary for batch calculation with batch update.
442
+
443
+ - key: Component type name to be updated in batch.
444
+ - value:
445
+
446
+ - For homogeneous update batch (a 2D numpy structured array):
447
+
448
+ - Dimension 0: Each batch.
449
+ - Dimension 1: Each updated element per batch for this component type.
450
+ - For inhomogeneous update batch (a dictionary containing two keys):
451
+
452
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
453
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
454
+ compressed sparse structure.
455
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
456
+ - data: 1D numpy structured array in flat.
457
+ threading (int, optional): Applicable only for batch calculation.
458
+
459
+ - < 0: Sequential
460
+ - = 0: Parallel, use number of hardware threads
461
+ - > 0: Specify number of parallel threads
462
+ output_component_types (ComponentAttributeMapping):
463
+
464
+ - None: Row based data for all component types.
465
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
466
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
467
+ - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
468
+ key: ComponentType
469
+ value:
470
+ - None: Row based data for the specified component types.
471
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
472
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
473
+ continue_on_batch_error (bool, optional):
474
+ Continue the program (instead of throwing error) if some scenarios fail.
475
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
476
+ decode_error (bool, optional):
477
+ Decode error messages to their derived types if possible.
478
+
479
+ Returns:
480
+ Dictionary of results of all components.
481
+
482
+ - key: Component type name to be updated in batch.
483
+ - value:
484
+
485
+ - For single calculation: 1D numpy structured array for the results of this component type.
486
+ - For batch calculation: 2D numpy structured array for the results of this component type.
487
+
488
+ - Dimension 0: Each batch.
489
+ - Dimension 1: The result of each element for this component type.
490
+
491
+ Raises:
492
+ Exception: In case an error in the core occurs, an exception will be thrown.
493
+ """
494
+ return self._calculate_power_flow(
495
+ symmetric=symmetric,
496
+ error_tolerance=error_tolerance,
497
+ max_iterations=max_iterations,
498
+ calculation_method=calculation_method,
499
+ update_data=(_map_to_component_types(update_data) if update_data is not None else None),
500
+ threading=threading,
501
+ output_component_types=output_component_types,
502
+ continue_on_batch_error=continue_on_batch_error,
503
+ decode_error=decode_error,
504
+ tap_changing_strategy=tap_changing_strategy,
505
+ )
506
+
507
+ def calculate_state_estimation( # noqa: PLR0913
508
+ self,
509
+ *,
510
+ symmetric: bool = True,
511
+ error_tolerance: float = 1e-8,
512
+ max_iterations: int = 20,
513
+ calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
514
+ update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
515
+ threading: int = -1,
516
+ output_component_types: ComponentAttributeMapping = None,
517
+ continue_on_batch_error: bool = False,
518
+ decode_error: bool = True,
519
+ ) -> dict[ComponentType, np.ndarray]:
520
+ """
521
+ Calculate state estimation once with the current model attributes.
522
+ Or calculate in batch with the given update dataset in batch.
523
+
524
+ Args:
525
+ symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
526
+
527
+ - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
528
+ - False: Three-phase asymmetric calculation.
529
+ error_tolerance (float, optional): error tolerance for voltage in p.u., only applicable when the
530
+ calculation method is iterative.
531
+ max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
532
+ is iterative.
533
+ calculation_method (an enumeration): Use iterative linear method.
534
+ update_data (dict, optional):
535
+ None: Calculate state estimation once with the current model attributes.
536
+ Or a dictionary for batch calculation with batch update.
537
+
538
+ - key: Component type name to be updated in batch.
539
+ - value:
540
+
541
+ - For homogeneous update batch (a 2D numpy structured array):
542
+
543
+ - Dimension 0: Each batch.
544
+ - Dimension 1: Each updated element per batch for this component type.
545
+ - For inhomogeneous update batch (a dictionary containing two keys):
546
+
547
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
548
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
549
+ compressed sparse structure.
550
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
551
+ - data: 1D numpy structured array in flat.
552
+ threading (int, optional): Applicable only for batch calculation.
553
+
554
+ - < 0: Sequential
555
+ - = 0: Parallel, use number of hardware threads
556
+ - > 0: Specify number of parallel threads
557
+ output_component_types (ComponentAttributeMapping):
558
+
559
+ - None: Row based data for all component types.
560
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
561
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
562
+ - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
563
+ key: ComponentType
564
+ value:
565
+ - None: Row based data for the specified component types.
566
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
567
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
568
+ continue_on_batch_error (bool, optional):
569
+ Continue the program (instead of throwing error) if some scenarios fail.
570
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
571
+ decode_error (bool, optional):
572
+ Decode error messages to their derived types if possible.
573
+
574
+ Returns:
575
+ Dictionary of results of all components.
576
+
577
+ - key: Component type name to be updated in batch.
578
+ - value:
579
+
580
+ - For single calculation: 1D numpy structured array for the results of this component type.
581
+ - For batch calculation: 2D numpy structured array for the results of this component type.
582
+
583
+ - Dimension 0: Each batch.
584
+ - Dimension 1: The result of each element for this component type.
585
+
586
+ Raises:
587
+ Exception: In case an error in the core occurs, an exception will be thrown.
588
+ """
589
+ return self._calculate_state_estimation(
590
+ symmetric=symmetric,
591
+ error_tolerance=error_tolerance,
592
+ max_iterations=max_iterations,
593
+ calculation_method=calculation_method,
594
+ update_data=(_map_to_component_types(update_data) if update_data is not None else None),
595
+ threading=threading,
596
+ output_component_types=output_component_types,
597
+ continue_on_batch_error=continue_on_batch_error,
598
+ decode_error=decode_error,
599
+ )
600
+
601
+ def calculate_short_circuit( # noqa: PLR0913
602
+ self,
603
+ *,
604
+ calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
605
+ update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
606
+ threading: int = -1,
607
+ output_component_types: ComponentAttributeMapping = None,
608
+ continue_on_batch_error: bool = False,
609
+ decode_error: bool = True,
610
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
611
+ ) -> dict[ComponentType, np.ndarray]:
612
+ """
613
+ Calculate a short circuit once with the current model attributes.
614
+ Or calculate in batch with the given update dataset in batch
615
+
616
+ Args:
617
+ calculation_method (an enumeration): Use the iec60909 standard.
618
+ update_data:
619
+ None: calculate a short circuit once with the current model attributes.
620
+ Or a dictionary for batch calculation with batch update
621
+
622
+ - key: Component type name to be updated in batch
623
+ - value:
624
+
625
+ - For homogeneous update batch (a 2D numpy structured array):
626
+
627
+ - Dimension 0: each batch
628
+ - Dimension 1: each updated element per batch for this component type
629
+ - For inhomogeneous update batch (a dictionary containing two keys):
630
+
631
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
632
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
633
+ compressed sparse structure.
634
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
635
+ - data: 1D numpy structured array in flat.
636
+ threading (int, optional): Applicable only for batch calculation.
637
+
638
+ - < 0: Sequential
639
+ - = 0: Parallel, use number of hardware threads
640
+ - > 0: Specify number of parallel threads
641
+ output_component_types (ComponentAttributeMapping):
642
+
643
+ - None: Row based data for all component types.
644
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
645
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
646
+ - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
647
+ key: ComponentType
648
+ value:
649
+ - None: Row based data for the specified component types.
650
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
651
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
652
+ continue_on_batch_error (bool, optional):
653
+ Continue the program (instead of throwing error) if some scenarios fail.
654
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
655
+ decode_error (bool, optional):
656
+ Decode error messages to their derived types if possible.
657
+ short_circuit_voltage_scaling ({ShortCircuitVoltageSaling, str}, optional):
658
+ Whether to use the maximum or minimum voltage scaling.
659
+ By default, the maximum voltage scaling is used to calculate the short circuit.
660
+
661
+ Returns:
662
+ Dictionary of results of all components.
663
+
664
+ - key: Component type name to be updated in batch.
665
+ - value:
666
+
667
+ - For single calculation: 1D numpy structured array for the results of this component type.
668
+ - For batch calculation: 2D numpy structured array for the results of this component type.
669
+
670
+ - Dimension 0: Each batch.
671
+ - Dimension 1: The result of each element for this component type.
672
+ Raises:
673
+ Exception: In case an error in the core occurs, an exception will be thrown.
674
+ """
675
+ return self._calculate_short_circuit(
676
+ calculation_method=calculation_method,
677
+ update_data=(_map_to_component_types(update_data) if update_data is not None else None),
678
+ threading=threading,
679
+ output_component_types=output_component_types,
680
+ continue_on_batch_error=continue_on_batch_error,
681
+ decode_error=decode_error,
682
+ short_circuit_voltage_scaling=short_circuit_voltage_scaling,
683
+ )
684
+
685
+ def __del__(self):
686
+ pgc.destroy_model(self._model_ptr)