power-grid-model 1.10.17__py3-none-win_amd64.whl → 1.12.119__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of power-grid-model might be problematic. Click here for more details.

Files changed (67) hide show
  1. power_grid_model/__init__.py +54 -29
  2. power_grid_model/_core/__init__.py +3 -3
  3. power_grid_model/_core/buffer_handling.py +507 -478
  4. power_grid_model/_core/data_handling.py +195 -141
  5. power_grid_model/_core/data_types.py +142 -0
  6. power_grid_model/_core/dataset_definitions.py +109 -109
  7. power_grid_model/_core/enum.py +226 -0
  8. power_grid_model/_core/error_handling.py +215 -198
  9. power_grid_model/_core/errors.py +134 -0
  10. power_grid_model/_core/index_integer.py +17 -17
  11. power_grid_model/_core/options.py +71 -69
  12. power_grid_model/_core/power_grid_core.py +577 -562
  13. power_grid_model/_core/power_grid_dataset.py +545 -490
  14. power_grid_model/_core/power_grid_meta.py +262 -244
  15. power_grid_model/_core/power_grid_model.py +1025 -687
  16. power_grid_model/_core/power_grid_model_c/__init__.py +3 -0
  17. power_grid_model/_core/power_grid_model_c/bin/power_grid_model_c.dll +0 -0
  18. power_grid_model/_core/power_grid_model_c/get_pgm_dll_path.py +63 -0
  19. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/basics.h +251 -0
  20. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/buffer.h +108 -0
  21. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset.h +332 -0
  22. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset_definitions.h +1060 -0
  23. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/handle.h +111 -0
  24. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/meta_data.h +189 -0
  25. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/model.h +130 -0
  26. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/options.h +142 -0
  27. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/serialization.h +118 -0
  28. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c.h +36 -0
  29. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/basics.hpp +65 -0
  30. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/buffer.hpp +61 -0
  31. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/dataset.hpp +224 -0
  32. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/handle.hpp +108 -0
  33. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/meta_data.hpp +84 -0
  34. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/model.hpp +63 -0
  35. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/options.hpp +52 -0
  36. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/serialization.hpp +124 -0
  37. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/utils.hpp +81 -0
  38. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp.hpp +19 -0
  39. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfig.cmake +37 -0
  40. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfigVersion.cmake +65 -0
  41. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelTargets-release.cmake +19 -0
  42. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelTargets.cmake +144 -0
  43. power_grid_model/_core/power_grid_model_c/lib/power_grid_model_c.lib +0 -0
  44. power_grid_model/_core/power_grid_model_c/share/LICENSE +292 -0
  45. power_grid_model/_core/power_grid_model_c/share/README.md +15 -0
  46. power_grid_model/_core/serialization.py +319 -317
  47. power_grid_model/_core/typing.py +20 -0
  48. power_grid_model/{_utils.py → _core/utils.py} +798 -783
  49. power_grid_model/data_types.py +321 -319
  50. power_grid_model/enum.py +27 -214
  51. power_grid_model/errors.py +37 -119
  52. power_grid_model/typing.py +43 -48
  53. power_grid_model/utils.py +529 -400
  54. power_grid_model/validation/__init__.py +25 -10
  55. power_grid_model/validation/{rules.py → _rules.py} +1167 -962
  56. power_grid_model/validation/{validation.py → _validation.py} +1172 -1015
  57. power_grid_model/validation/assertions.py +93 -92
  58. power_grid_model/validation/errors.py +602 -524
  59. power_grid_model/validation/utils.py +313 -318
  60. {power_grid_model-1.10.17.dist-info → power_grid_model-1.12.119.dist-info}/METADATA +162 -165
  61. power_grid_model-1.12.119.dist-info/RECORD +65 -0
  62. {power_grid_model-1.10.17.dist-info → power_grid_model-1.12.119.dist-info}/WHEEL +1 -1
  63. power_grid_model-1.12.119.dist-info/entry_points.txt +3 -0
  64. power_grid_model/_core/_power_grid_core.dll +0 -0
  65. power_grid_model-1.10.17.dist-info/RECORD +0 -32
  66. power_grid_model-1.10.17.dist-info/top_level.txt +0 -1
  67. {power_grid_model-1.10.17.dist-info → power_grid_model-1.12.119.dist-info/licenses}/LICENSE +0 -0
@@ -1,687 +1,1025 @@
1
- # SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
2
- #
3
- # SPDX-License-Identifier: MPL-2.0
4
-
5
- """
6
- Main power grid model class
7
- """
8
-
9
- from enum import IntEnum
10
- from typing import Type
11
-
12
- import numpy as np
13
-
14
- from power_grid_model._core.data_handling import (
15
- create_output_data,
16
- get_output_type,
17
- prepare_input_view,
18
- prepare_output_view,
19
- prepare_update_view,
20
- )
21
- from power_grid_model._core.dataset_definitions import (
22
- ComponentType,
23
- ComponentTypeLike,
24
- _map_to_component_types,
25
- _str_to_component_type,
26
- )
27
- from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors
28
- from power_grid_model._core.index_integer import IdNp, IdxNp
29
- from power_grid_model._core.options import Options
30
- from power_grid_model._core.power_grid_core import ConstDatasetPtr, IDPtr, IdxPtr, ModelPtr, power_grid_core as pgc
31
- from power_grid_model.data_types import Dataset, SingleDataset
32
- from power_grid_model.enum import (
33
- CalculationMethod,
34
- CalculationType,
35
- ShortCircuitVoltageScaling,
36
- TapChangingStrategy,
37
- _ExperimentalFeatures,
38
- )
39
- from power_grid_model.typing import ComponentAttributeMapping
40
-
41
-
42
- class PowerGridModel:
43
- """
44
- Main class for Power Grid Model
45
- """
46
-
47
- _model_ptr: ModelPtr
48
- _all_component_count: dict[ComponentType, int] | None
49
- _batch_error: PowerGridBatchError | None
50
-
51
- @property
52
- def batch_error(self) -> PowerGridBatchError | None:
53
- """
54
- Get the batch error object, if present
55
-
56
- Returns:
57
- Batch error object, or None
58
- """
59
- return self._batch_error
60
-
61
- @property
62
- def _model(self):
63
- if not self._model_ptr:
64
- raise TypeError("You have an empty instance of PowerGridModel!")
65
- return self._model_ptr
66
-
67
- @property
68
- def all_component_count(self) -> dict[ComponentType, int]:
69
- """
70
- Get amount of elements per component type.
71
- If the count for a component type is zero, it will not be in the returned dictionary.
72
-
73
- Returns:
74
- A dictionary with
75
-
76
- - key: Component type name
77
- - value: Integer count of elements of this type
78
- """
79
- if self._all_component_count is None:
80
- raise TypeError("You have an empty instance of PowerGridModel!")
81
- return self._all_component_count
82
-
83
- def copy(self) -> "PowerGridModel":
84
- """
85
- Copy the current model
86
-
87
- Returns:
88
- A copy of PowerGridModel
89
- """
90
- new_model = PowerGridModel.__new__(PowerGridModel)
91
- new_model._model_ptr = pgc.copy_model(self._model) # pylint: disable=W0212
92
- assert_no_error()
93
- new_model._all_component_count = self._all_component_count # pylint: disable=W0212
94
- return new_model
95
-
96
- def __copy__(self):
97
- return self.copy()
98
-
99
- def __new__(cls, *_args, **_kwargs):
100
- instance = super().__new__(cls)
101
- instance._model_ptr = ModelPtr()
102
- instance._all_component_count = None
103
- return instance
104
-
105
- def __init__(self, input_data: SingleDataset, system_frequency: float = 50.0):
106
- """
107
- Initialize the model from an input data set.
108
-
109
- Args:
110
- input_data: Input data dictionary
111
-
112
- - key: Component type
113
- - value: Component data with the correct type :class:`SingleComponentData`
114
-
115
- system_frequency: Frequency of the power system, default 50 Hz
116
- """
117
- # destroy old instance
118
- pgc.destroy_model(self._model_ptr)
119
- self._all_component_count = None
120
- # create new
121
- prepared_input = prepare_input_view(_map_to_component_types(input_data))
122
- self._model_ptr = pgc.create_model(system_frequency, input_data=prepared_input.get_dataset_ptr())
123
- assert_no_error()
124
- self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0}
125
-
126
- def update(self, *, update_data: Dataset):
127
- """
128
- Update the model with changes.
129
-
130
- The model will be in an invalid state if the update fails and should be discarded.
131
-
132
- Args:
133
- update_data: Update data dictionary
134
-
135
- - key: Component type
136
- - value: Component data with the correct type :class:`ComponentData` (single scenario or batch)
137
-
138
- Raises:
139
- PowerGridError if the update fails. The model is left in an invalid state and should be discarded.
140
-
141
- Returns:
142
- None
143
- """
144
- prepared_update = prepare_update_view(_map_to_component_types(update_data))
145
- pgc.update_model(self._model, prepared_update.get_dataset_ptr())
146
- assert_no_error()
147
-
148
- def get_indexer(self, component_type: ComponentTypeLike, ids: np.ndarray):
149
- """
150
- Get array of indexers given array of ids for component type.
151
-
152
- This enables syntax like input_data[ComponentType.node][get_indexer(ids)]
153
-
154
- Args:
155
- component_type: Type of component
156
- ids: Array of ids
157
-
158
- Returns:
159
- Array of indexers, same shape as input array ids
160
- """
161
- component_type = _str_to_component_type(component_type)
162
- ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)
163
- indexer = np.empty_like(ids, dtype=IdxNp, order="C")
164
- indexer_c = indexer.ctypes.data_as(IdxPtr)
165
- size = ids.size
166
- # call c function
167
- pgc.get_indexer(self._model, component_type, size, ids_c, indexer_c)
168
- assert_no_error()
169
- return indexer
170
-
171
- def _get_output_component_count(self, calculation_type: CalculationType):
172
- exclude_types = {
173
- CalculationType.power_flow: [
174
- ComponentType.sym_voltage_sensor,
175
- ComponentType.asym_voltage_sensor,
176
- ComponentType.sym_power_sensor,
177
- ComponentType.asym_power_sensor,
178
- ComponentType.fault,
179
- ],
180
- CalculationType.state_estimation: [ComponentType.fault],
181
- CalculationType.short_circuit: [
182
- ComponentType.sym_voltage_sensor,
183
- ComponentType.asym_voltage_sensor,
184
- ComponentType.sym_power_sensor,
185
- ComponentType.asym_power_sensor,
186
- ],
187
- }.get(calculation_type, [])
188
-
189
- def include_type(component_type: ComponentType):
190
- for exclude_type in exclude_types:
191
- if exclude_type.value in component_type.value:
192
- return False
193
- return True
194
-
195
- return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)}
196
-
197
- # pylint: disable=too-many-arguments
198
- def _construct_output( # pylint: disable=too-many-positional-arguments
199
- self,
200
- output_component_types: ComponentAttributeMapping,
201
- calculation_type: CalculationType,
202
- symmetric: bool,
203
- is_batch: bool,
204
- batch_size: int,
205
- ) -> dict[ComponentType, np.ndarray]:
206
- all_component_count = self._get_output_component_count(calculation_type=calculation_type)
207
- return create_output_data(
208
- output_component_types=output_component_types,
209
- output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
210
- all_component_count=all_component_count,
211
- is_batch=is_batch,
212
- batch_size=batch_size,
213
- )
214
-
215
- @staticmethod
216
- def _options(**kwargs) -> Options:
217
- def as_enum_value(key_enum: str, type_: Type[IntEnum]):
218
- if key_enum in kwargs:
219
- value_enum = kwargs[key_enum]
220
- if isinstance(value_enum, str):
221
- kwargs[key_enum] = type_[value_enum]
222
-
223
- as_enum_value("calculation_method", CalculationMethod)
224
- as_enum_value("tap_changing_strategy", TapChangingStrategy)
225
- as_enum_value("short_circuit_voltage_scaling", ShortCircuitVoltageScaling)
226
- as_enum_value("experimental_features", _ExperimentalFeatures)
227
-
228
- opt = Options()
229
- for key, value in kwargs.items():
230
- setattr(opt, key, value.value if isinstance(value, IntEnum) else value)
231
- return opt
232
-
233
- def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_error: bool):
234
- self._batch_error = handle_errors(
235
- continue_on_batch_error=continue_on_batch_error,
236
- batch_size=batch_size,
237
- decode_error=decode_error,
238
- )
239
-
240
- # pylint: disable=too-many-arguments
241
- def _calculate_impl( # pylint: disable=too-many-positional-arguments
242
- self,
243
- calculation_type: CalculationType,
244
- symmetric: bool,
245
- update_data: Dataset | None,
246
- output_component_types: ComponentAttributeMapping,
247
- options: Options,
248
- continue_on_batch_error: bool,
249
- decode_error: bool,
250
- experimental_features: _ExperimentalFeatures | str, # pylint: disable=too-many-arguments,unused-argument
251
- ):
252
- """
253
- Core calculation routine
254
-
255
- Args:
256
- calculation_type:
257
- symmetric:
258
- update_data:
259
- output_component_types:
260
- options:
261
- continue_on_batch_error:
262
- decode_error:
263
-
264
- Returns:
265
- """
266
- self._batch_error = None
267
- is_batch = update_data is not None
268
-
269
- if update_data is not None:
270
- prepared_update = prepare_update_view(update_data)
271
- update_ptr = prepared_update.get_dataset_ptr()
272
- batch_size = prepared_update.get_info().batch_size()
273
- else:
274
- update_ptr = ConstDatasetPtr()
275
- batch_size = 1
276
-
277
- output_data = self._construct_output(
278
- output_component_types=output_component_types,
279
- calculation_type=calculation_type,
280
- symmetric=symmetric,
281
- is_batch=is_batch,
282
- batch_size=batch_size,
283
- )
284
- prepared_result = prepare_output_view(
285
- output_data=output_data,
286
- output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
287
- )
288
-
289
- # run calculation
290
- pgc.calculate(
291
- # model and options
292
- self._model,
293
- options.opt,
294
- output_data=prepared_result.get_dataset_ptr(),
295
- update_data=update_ptr,
296
- )
297
-
298
- self._handle_errors(
299
- continue_on_batch_error=continue_on_batch_error,
300
- batch_size=batch_size,
301
- decode_error=decode_error,
302
- )
303
-
304
- return output_data
305
-
306
- def _calculate_power_flow(
307
- self,
308
- *,
309
- symmetric: bool = True,
310
- error_tolerance: float = 1e-8,
311
- max_iterations: int = 20,
312
- calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
313
- update_data: Dataset | None = None,
314
- threading: int = -1,
315
- output_component_types: ComponentAttributeMapping = None,
316
- continue_on_batch_error: bool = False,
317
- decode_error: bool = True,
318
- tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
319
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
320
- ):
321
- calculation_type = CalculationType.power_flow
322
- options = self._options(
323
- calculation_type=calculation_type,
324
- symmetric=symmetric,
325
- error_tolerance=error_tolerance,
326
- max_iterations=max_iterations,
327
- calculation_method=calculation_method,
328
- tap_changing_strategy=tap_changing_strategy,
329
- threading=threading,
330
- experimental_features=experimental_features,
331
- )
332
- return self._calculate_impl(
333
- calculation_type=calculation_type,
334
- symmetric=symmetric,
335
- update_data=update_data,
336
- output_component_types=output_component_types,
337
- options=options,
338
- continue_on_batch_error=continue_on_batch_error,
339
- decode_error=decode_error,
340
- experimental_features=experimental_features,
341
- )
342
-
343
- def _calculate_state_estimation(
344
- self,
345
- *,
346
- symmetric: bool = True,
347
- error_tolerance: float = 1e-8,
348
- max_iterations: int = 20,
349
- calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
350
- update_data: Dataset | None = None,
351
- threading: int = -1,
352
- output_component_types: ComponentAttributeMapping = None,
353
- continue_on_batch_error: bool = False,
354
- decode_error: bool = True,
355
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
356
- ) -> dict[ComponentType, np.ndarray]:
357
- calculation_type = CalculationType.state_estimation
358
- options = self._options(
359
- calculation_type=calculation_type,
360
- symmetric=symmetric,
361
- error_tolerance=error_tolerance,
362
- max_iterations=max_iterations,
363
- calculation_method=calculation_method,
364
- threading=threading,
365
- experimental_features=experimental_features,
366
- )
367
- return self._calculate_impl(
368
- calculation_type=calculation_type,
369
- symmetric=symmetric,
370
- update_data=update_data,
371
- output_component_types=output_component_types,
372
- options=options,
373
- continue_on_batch_error=continue_on_batch_error,
374
- decode_error=decode_error,
375
- experimental_features=experimental_features,
376
- )
377
-
378
- def _calculate_short_circuit(
379
- self,
380
- *,
381
- calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
382
- update_data: Dataset | None = None,
383
- threading: int = -1,
384
- output_component_types: ComponentAttributeMapping = None,
385
- continue_on_batch_error: bool = False,
386
- decode_error: bool = True,
387
- short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
388
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
389
- ) -> dict[ComponentType, np.ndarray]:
390
- calculation_type = CalculationType.short_circuit
391
- symmetric = False
392
-
393
- options = self._options(
394
- calculation_type=calculation_type,
395
- symmetric=symmetric,
396
- calculation_method=calculation_method,
397
- threading=threading,
398
- short_circuit_voltage_scaling=short_circuit_voltage_scaling,
399
- experimental_features=experimental_features,
400
- )
401
- return self._calculate_impl(
402
- calculation_type=calculation_type,
403
- symmetric=symmetric,
404
- update_data=update_data,
405
- output_component_types=output_component_types,
406
- options=options,
407
- continue_on_batch_error=continue_on_batch_error,
408
- decode_error=decode_error,
409
- experimental_features=experimental_features,
410
- )
411
-
412
- def calculate_power_flow(
413
- self,
414
- *,
415
- symmetric: bool = True,
416
- error_tolerance: float = 1e-8,
417
- max_iterations: int = 20,
418
- calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
419
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
420
- threading: int = -1,
421
- output_component_types: ComponentAttributeMapping = None,
422
- continue_on_batch_error: bool = False,
423
- decode_error: bool = True,
424
- tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
425
- ) -> dict[ComponentType, np.ndarray]:
426
- """
427
- Calculate power flow once with the current model attributes.
428
- Or calculate in batch with the given update dataset in batch.
429
-
430
- Args:
431
- symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
432
-
433
- - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
434
- - False: Three-phase asymmetric calculation.
435
- error_tolerance (float, optional): Error tolerance for voltage in p.u., applicable only when the
436
- calculation method is iterative.
437
- max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
438
- is iterative.
439
- calculation_method (an enumeration or string): The calculation method to use.
440
-
441
- - newton_raphson: Use Newton-Raphson iterative method (default).
442
- - linear: Use linear method.
443
- update_data (dict, optional):
444
- None: Calculate power flow once with the current model attributes.
445
- Or a dictionary for batch calculation with batch update.
446
-
447
- - key: Component type name to be updated in batch.
448
- - value:
449
-
450
- - For homogeneous update batch (a 2D numpy structured array):
451
-
452
- - Dimension 0: Each batch.
453
- - Dimension 1: Each updated element per batch for this component type.
454
- - For inhomogeneous update batch (a dictionary containing two keys):
455
-
456
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
457
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
458
- compressed sparse structure.
459
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
460
- - data: 1D numpy structured array in flat.
461
- threading (int, optional): Applicable only for batch calculation.
462
-
463
- - < 0: Sequential
464
- - = 0: Parallel, use number of hardware threads
465
- - > 0: Specify number of parallel threads
466
- output_component_types (ComponentAttributeMapping):
467
-
468
- - None: Row based data for all component types.
469
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
470
- - ComponentAttributeFilterOptions: Columnar data for all component types.
471
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
472
- key: ComponentType
473
- value:
474
- - None: Row based data for the specified component types.
475
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
476
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
477
- continue_on_batch_error (bool, optional): Continue the program (instead of throwing error) if some
478
- scenarios fail.
479
- decode_error (bool, optional):
480
- Decode error messages to their derived types if possible.
481
-
482
- Returns:
483
- Dictionary of results of all components.
484
-
485
- - key: Component type name to be updated in batch.
486
- - value:
487
-
488
- - For single calculation: 1D numpy structured array for the results of this component type.
489
- - For batch calculation: 2D numpy structured array for the results of this component type.
490
-
491
- - Dimension 0: Each batch.
492
- - Dimension 1: The result of each element for this component type.
493
-
494
- Raises:
495
- Exception: In case an error in the core occurs, an exception will be thrown.
496
- """
497
- return self._calculate_power_flow(
498
- symmetric=symmetric,
499
- error_tolerance=error_tolerance,
500
- max_iterations=max_iterations,
501
- calculation_method=calculation_method,
502
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
503
- threading=threading,
504
- output_component_types=output_component_types,
505
- continue_on_batch_error=continue_on_batch_error,
506
- decode_error=decode_error,
507
- tap_changing_strategy=tap_changing_strategy,
508
- )
509
-
510
- def calculate_state_estimation(
511
- self,
512
- *,
513
- symmetric: bool = True,
514
- error_tolerance: float = 1e-8,
515
- max_iterations: int = 20,
516
- calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
517
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
518
- threading: int = -1,
519
- output_component_types: ComponentAttributeMapping = None,
520
- continue_on_batch_error: bool = False,
521
- decode_error: bool = True,
522
- ) -> dict[ComponentType, np.ndarray]:
523
- """
524
- Calculate state estimation once with the current model attributes.
525
- Or calculate in batch with the given update dataset in batch.
526
-
527
- Args:
528
- symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
529
-
530
- - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
531
- - False: Three-phase asymmetric calculation.
532
- error_tolerance (float, optional): error tolerance for voltage in p.u., only applicable when the
533
- calculation method is iterative.
534
- max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
535
- is iterative.
536
- calculation_method (an enumeration): Use iterative linear method.
537
- update_data (dict, optional):
538
- None: Calculate state estimation once with the current model attributes.
539
- Or a dictionary for batch calculation with batch update.
540
-
541
- - key: Component type name to be updated in batch.
542
- - value:
543
-
544
- - For homogeneous update batch (a 2D numpy structured array):
545
-
546
- - Dimension 0: Each batch.
547
- - Dimension 1: Each updated element per batch for this component type.
548
- - For inhomogeneous update batch (a dictionary containing two keys):
549
-
550
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
551
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
552
- compressed sparse structure.
553
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
554
- - data: 1D numpy structured array in flat.
555
- threading (int, optional): Applicable only for batch calculation.
556
-
557
- - < 0: Sequential
558
- - = 0: Parallel, use number of hardware threads
559
- - > 0: Specify number of parallel threads
560
- output_component_types (ComponentAttributeMapping):
561
-
562
- - None: Row based data for all component types.
563
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
564
- - ComponentAttributeFilterOptions: Columnar data for all component types.
565
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
566
- key: ComponentType
567
- value:
568
- - None: Row based data for the specified component types.
569
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
570
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
571
- continue_on_batch_error (bool, optional): Continue the program (instead of throwing error) if some
572
- scenarios fail.
573
- decode_error (bool, optional):
574
- Decode error messages to their derived types if possible.
575
-
576
- Returns:
577
- Dictionary of results of all components.
578
-
579
- - key: Component type name to be updated in batch.
580
- - value:
581
-
582
- - For single calculation: 1D numpy structured array for the results of this component type.
583
- - For batch calculation: 2D numpy structured array for the results of this component type.
584
-
585
- - Dimension 0: Each batch.
586
- - Dimension 1: The result of each element for this component type.
587
-
588
- Raises:
589
- Exception: In case an error in the core occurs, an exception will be thrown.
590
- """
591
- return self._calculate_state_estimation(
592
- symmetric=symmetric,
593
- error_tolerance=error_tolerance,
594
- max_iterations=max_iterations,
595
- calculation_method=calculation_method,
596
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
597
- threading=threading,
598
- output_component_types=output_component_types,
599
- continue_on_batch_error=continue_on_batch_error,
600
- decode_error=decode_error,
601
- )
602
-
603
- def calculate_short_circuit(
604
- self,
605
- *,
606
- calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
607
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
608
- threading: int = -1,
609
- output_component_types: ComponentAttributeMapping = None,
610
- continue_on_batch_error: bool = False,
611
- decode_error: bool = True,
612
- short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
613
- ) -> dict[ComponentType, np.ndarray]:
614
- """
615
- Calculate a short circuit once with the current model attributes.
616
- Or calculate in batch with the given update dataset in batch
617
-
618
- Args:
619
- calculation_method (an enumeration): Use the iec60909 standard.
620
- update_data:
621
- None: calculate a short circuit once with the current model attributes.
622
- Or a dictionary for batch calculation with batch update
623
-
624
- - key: Component type name to be updated in batch
625
- - value:
626
-
627
- - For homogeneous update batch (a 2D numpy structured array):
628
-
629
- - Dimension 0: each batch
630
- - Dimension 1: each updated element per batch for this component type
631
- - For inhomogeneous update batch (a dictionary containing two keys):
632
-
633
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
634
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
635
- compressed sparse structure.
636
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
637
- - data: 1D numpy structured array in flat.
638
- threading (int, optional): Applicable only for batch calculation.
639
-
640
- - < 0: Sequential
641
- - = 0: Parallel, use number of hardware threads
642
- - > 0: Specify number of parallel threads
643
- output_component_types (ComponentAttributeMapping):
644
-
645
- - None: Row based data for all component types.
646
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
647
- - ComponentAttributeFilterOptions: Columnar data for all component types.
648
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
649
- key: ComponentType
650
- value:
651
- - None: Row based data for the specified component types.
652
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
653
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
654
- continue_on_batch_error (bool, optional):
655
- Continue the program (instead of throwing error) if some scenarios fail.
656
- decode_error (bool, optional):
657
- Decode error messages to their derived types if possible.
658
- short_circuit_voltage_scaling ({ShortCircuitVoltageSaling, str}, optional):
659
- Whether to use the maximum or minimum voltage scaling.
660
- By default, the maximum voltage scaling is used to calculate the short circuit.
661
-
662
- Returns:
663
- Dictionary of results of all components.
664
-
665
- - key: Component type name to be updated in batch.
666
- - value:
667
-
668
- - For single calculation: 1D numpy structured array for the results of this component type.
669
- - For batch calculation: 2D numpy structured array for the results of this component type.
670
-
671
- - Dimension 0: Each batch.
672
- - Dimension 1: The result of each element for this component type.
673
- Raises:
674
- Exception: In case an error in the core occurs, an exception will be thrown.
675
- """
676
- return self._calculate_short_circuit(
677
- calculation_method=calculation_method,
678
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
679
- threading=threading,
680
- output_component_types=output_component_types,
681
- continue_on_batch_error=continue_on_batch_error,
682
- decode_error=decode_error,
683
- short_circuit_voltage_scaling=short_circuit_voltage_scaling,
684
- )
685
-
686
- def __del__(self):
687
- pgc.destroy_model(self._model_ptr)
1
+ # SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
2
+ #
3
+ # SPDX-License-Identifier: MPL-2.0
4
+
5
+ """
6
+ Main power grid model class
7
+ """
8
+
9
+ import itertools
10
+ from enum import IntEnum
11
+ from math import prod
12
+ from typing import Any, overload
13
+
14
+ import numpy as np
15
+
16
+ from power_grid_model._core.data_handling import (
17
+ create_output_data,
18
+ get_output_type,
19
+ prepare_input_view,
20
+ prepare_output_view,
21
+ prepare_update_view,
22
+ )
23
+ from power_grid_model._core.data_types import (
24
+ BatchDataset,
25
+ Dataset,
26
+ DenseBatchColumnarOutputDataset,
27
+ DenseBatchOutputDataset,
28
+ DenseBatchRowBasedOutputDataset,
29
+ SingleColumnarOutputDataset,
30
+ SingleDataset,
31
+ SingleOutputDataset,
32
+ SingleRowBasedDataset,
33
+ SingleRowBasedOutputDataset,
34
+ )
35
+ from power_grid_model._core.dataset_definitions import (
36
+ ComponentType,
37
+ ComponentTypeLike,
38
+ ComponentTypeVar,
39
+ _map_to_component_types,
40
+ _str_to_component_type,
41
+ )
42
+ from power_grid_model._core.enum import (
43
+ CalculationMethod,
44
+ CalculationType,
45
+ ComponentAttributeFilterOptions,
46
+ ShortCircuitVoltageScaling,
47
+ TapChangingStrategy,
48
+ _ExperimentalFeatures,
49
+ )
50
+ from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors
51
+ from power_grid_model._core.index_integer import IdNp, IdxNp
52
+ from power_grid_model._core.options import Options
53
+ from power_grid_model._core.power_grid_core import (
54
+ ConstDatasetPtr,
55
+ IDPtr,
56
+ IdxPtr,
57
+ ModelPtr,
58
+ get_power_grid_core as get_pgc,
59
+ )
60
+ from power_grid_model._core.typing import ComponentAttributeMapping, ComponentAttributeMappingDict
61
+
62
+
63
+ class PowerGridModel:
64
+ """
65
+ Main class for Power Grid Model
66
+ """
67
+
68
+ _model_ptr: ModelPtr
69
+ _all_component_count: dict[ComponentType, int] | None
70
+ _batch_error: PowerGridBatchError | None
71
+
72
+ @property
73
+ def batch_error(self) -> PowerGridBatchError | None:
74
+ """
75
+ Get the batch error object, if present, after a batch calculation with errors.
76
+
77
+ Also works when continue_on_batch_error was set to True during the calculation.
78
+
79
+ Returns:
80
+ Batch error object, or None
81
+ """
82
+ return self._batch_error
83
+
84
+ @property
85
+ def _model(self):
86
+ if not self._model_ptr:
87
+ raise TypeError("You have an empty instance of PowerGridModel!")
88
+ return self._model_ptr
89
+
90
+ @property
91
+ def all_component_count(self) -> dict[ComponentType, int]:
92
+ """
93
+ Get amount of elements per component type.
94
+ If the count for a component type is zero, it will not be in the returned dictionary.
95
+
96
+ Returns:
97
+ A dictionary with
98
+
99
+ - key: Component type name
100
+ - value: Integer count of elements of this type
101
+ """
102
+ if self._all_component_count is None:
103
+ raise TypeError("You have an empty instance of PowerGridModel!")
104
+ return self._all_component_count
105
+
106
+ def copy(self) -> "PowerGridModel":
107
+ """
108
+ Copy the current model
109
+
110
+ Returns:
111
+ A copy of PowerGridModel
112
+ """
113
+ new_model = PowerGridModel.__new__(PowerGridModel)
114
+ new_model._model_ptr = get_pgc().copy_model(self._model)
115
+ assert_no_error()
116
+ new_model._all_component_count = self._all_component_count
117
+ return new_model
118
+
119
+ def __copy__(self) -> "PowerGridModel":
120
+ return self.copy()
121
+
122
+ def __deepcopy__(self, memo: dict[int, Any]) -> "PowerGridModel":
123
+ # PowerGridModel.copy makes already a deepcopy
124
+ new_model = self.copy()
125
+
126
+ # memorize that this object (self) has been deepcopied
127
+ memo[id(self)] = new_model
128
+
129
+ return new_model
130
+
131
+ def __repr__(self) -> str:
132
+ """Return a string representation of the current model.
133
+
134
+ This includes the total number of components and the number of components per component type of the model.
135
+
136
+ Returns:
137
+ String representation of the model
138
+ """
139
+ try:
140
+ component_count = self.all_component_count
141
+ except TypeError:
142
+ component_count = {}
143
+
144
+ message = f"{self.__class__.__name__} ({sum(component_count.values())} components)\n"
145
+
146
+ for component_type, number in component_count.items():
147
+ message += f" - {component_type.value}: {number}\n"
148
+
149
+ return message
150
+
151
+ def __new__(cls, *_args, **_kwargs):
152
+ instance = super().__new__(cls)
153
+ instance._model_ptr = ModelPtr()
154
+ instance._all_component_count = None
155
+ return instance
156
+
157
+ def __init__(self, input_data: SingleDataset, system_frequency: float = 50.0):
158
+ """
159
+ Initialize the model from an input data set.
160
+
161
+ Args:
162
+ input_data: Input data dictionary
163
+
164
+ - key: Component type
165
+ - value: Component data with the correct type :class:`SingleComponentData`
166
+
167
+ system_frequency: Frequency of the power system, default 50 Hz
168
+ """
169
+ # destroy old instance
170
+ get_pgc().destroy_model(self._model_ptr)
171
+ self._all_component_count = None
172
+ # create new
173
+ prepared_input = prepare_input_view(_map_to_component_types(input_data))
174
+ self._model_ptr = get_pgc().create_model(system_frequency, input_data=prepared_input.get_dataset_ptr())
175
+ assert_no_error()
176
+ self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0}
177
+
178
+ def update(self, *, update_data: Dataset):
179
+ """
180
+ Update the model with changes.
181
+
182
+ The model will be in an invalid state if the update fails and should be discarded.
183
+
184
+ Args:
185
+ update_data: Update data dictionary
186
+
187
+ - key: Component type
188
+ - value: Component data with the correct type :class:`ComponentData` (single scenario or batch)
189
+
190
+ Raises:
191
+ PowerGridError if the update fails. The model is left in an invalid state and should be discarded.
192
+
193
+ Returns:
194
+ None
195
+ """
196
+ prepared_update = prepare_update_view(_map_to_component_types(update_data))
197
+ get_pgc().update_model(self._model, prepared_update.get_dataset_ptr())
198
+ assert_no_error()
199
+
200
+ def get_indexer(self, component_type: ComponentTypeLike, ids: np.ndarray):
201
+ """
202
+ Get array of indexers given array of ids for component type.
203
+
204
+ This enables syntax like input_data[ComponentType.node][get_indexer(ids)]
205
+
206
+ Args:
207
+ component_type: Type of component
208
+ ids: Array of ids
209
+
210
+ Returns:
211
+ Array of indexers, same shape as input array ids
212
+ """
213
+ component_type = _str_to_component_type(component_type)
214
+ ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)
215
+ indexer = np.empty_like(ids, dtype=IdxNp, order="C")
216
+ indexer_c = indexer.ctypes.data_as(IdxPtr)
217
+ size = ids.size
218
+ # call c function
219
+ get_pgc().get_indexer(self._model, component_type, size, ids_c, indexer_c)
220
+ assert_no_error()
221
+ return indexer
222
+
223
+ def _get_output_component_count(self, calculation_type: CalculationType):
224
+ exclude_types = {
225
+ CalculationType.power_flow: [
226
+ ComponentType.sym_voltage_sensor,
227
+ ComponentType.asym_voltage_sensor,
228
+ ComponentType.sym_power_sensor,
229
+ ComponentType.asym_power_sensor,
230
+ ComponentType.fault,
231
+ ],
232
+ CalculationType.state_estimation: [ComponentType.fault],
233
+ CalculationType.short_circuit: [
234
+ ComponentType.sym_voltage_sensor,
235
+ ComponentType.asym_voltage_sensor,
236
+ ComponentType.sym_power_sensor,
237
+ ComponentType.asym_power_sensor,
238
+ ],
239
+ }.get(calculation_type, [])
240
+
241
+ def include_type(component_type: ComponentType):
242
+ return all(exclude_type.value not in component_type.value for exclude_type in exclude_types)
243
+
244
+ return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)}
245
+
246
+ def _construct_output(
247
+ self,
248
+ output_component_types: ComponentAttributeMapping,
249
+ calculation_type: CalculationType,
250
+ symmetric: bool,
251
+ is_batch: bool,
252
+ batch_size: int,
253
+ ):
254
+ all_component_count = self._get_output_component_count(calculation_type=calculation_type)
255
+ return create_output_data(
256
+ output_component_types=output_component_types,
257
+ output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
258
+ all_component_count=all_component_count,
259
+ is_batch=is_batch,
260
+ batch_size=batch_size,
261
+ )
262
+
263
+ @staticmethod
264
+ def _options(**kwargs) -> Options:
265
+ def as_enum_value(key_enum: str, type_: type[IntEnum]):
266
+ if key_enum in kwargs:
267
+ value_enum = kwargs[key_enum]
268
+ if isinstance(value_enum, str):
269
+ kwargs[key_enum] = type_[value_enum] # NOSONAR(S5864) IntEnum has __getitem__
270
+
271
+ as_enum_value("calculation_method", CalculationMethod)
272
+ as_enum_value("tap_changing_strategy", TapChangingStrategy)
273
+ as_enum_value("short_circuit_voltage_scaling", ShortCircuitVoltageScaling)
274
+ as_enum_value("experimental_features", _ExperimentalFeatures)
275
+
276
+ opt = Options()
277
+ for key, value in kwargs.items():
278
+ setattr(opt, key, value.value if isinstance(value, IntEnum) else value)
279
+ return opt
280
+
281
+ def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_error: bool):
282
+ self._batch_error = handle_errors(
283
+ continue_on_batch_error=continue_on_batch_error,
284
+ batch_size=batch_size,
285
+ decode_error=decode_error,
286
+ )
287
+
288
+ def _calculate_impl( # noqa: PLR0913
289
+ self,
290
+ calculation_type: CalculationType,
291
+ symmetric: bool,
292
+ update_data: Dataset | list[Dataset] | None,
293
+ output_component_types: ComponentAttributeMapping,
294
+ options: Options,
295
+ continue_on_batch_error: bool,
296
+ decode_error: bool,
297
+ experimental_features: _ExperimentalFeatures | str, # NOSONAR # noqa: ARG002
298
+ ) -> Dataset:
299
+ """
300
+ Core calculation routine
301
+
302
+ Args:
303
+ calculation_type:
304
+ symmetric:
305
+ update_data:
306
+ output_component_types:
307
+ options:
308
+ continue_on_batch_error:
309
+ decode_error:
310
+
311
+ Returns:
312
+ """
313
+ self._batch_error = None
314
+ if update_data is None:
315
+ is_batch = False
316
+ update_data = []
317
+ else:
318
+ is_batch = True
319
+ if not isinstance(update_data, list):
320
+ update_data = [update_data]
321
+ update_data = [_map_to_component_types(x) for x in update_data]
322
+ prepared_update = [prepare_update_view(x) for x in update_data]
323
+ for this_dataset, next_dataset in itertools.pairwise(prepared_update):
324
+ this_dataset.set_next_cartesian_product_dimension(next_dataset)
325
+ update_ptr: ConstDatasetPtr = prepared_update[0].get_dataset_ptr() if prepared_update else ConstDatasetPtr()
326
+ batch_size = prod(x.get_info().batch_size() for x in prepared_update)
327
+
328
+ output_data = self._construct_output(
329
+ output_component_types=output_component_types,
330
+ calculation_type=calculation_type,
331
+ symmetric=symmetric,
332
+ is_batch=is_batch,
333
+ batch_size=batch_size,
334
+ )
335
+ prepared_result = prepare_output_view(
336
+ output_data=output_data,
337
+ output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
338
+ )
339
+
340
+ # run calculation
341
+ get_pgc().calculate(
342
+ # model and options
343
+ self._model,
344
+ options.opt,
345
+ output_data=prepared_result.get_dataset_ptr(),
346
+ update_data=update_ptr,
347
+ )
348
+
349
+ self._handle_errors(
350
+ continue_on_batch_error=continue_on_batch_error,
351
+ batch_size=batch_size,
352
+ decode_error=decode_error,
353
+ )
354
+
355
+ return output_data
356
+
357
+ def _calculate_power_flow( # noqa: PLR0913
358
+ self,
359
+ *,
360
+ symmetric: bool = True,
361
+ error_tolerance: float = 1e-8,
362
+ max_iterations: int = 20,
363
+ calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
364
+ update_data: Dataset | list[Dataset] | None = None,
365
+ threading: int = -1,
366
+ output_component_types: ComponentAttributeMapping = None,
367
+ continue_on_batch_error: bool = False,
368
+ decode_error: bool = True,
369
+ tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
370
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
371
+ ) -> Dataset:
372
+ calculation_type = CalculationType.power_flow
373
+ options = self._options(
374
+ calculation_type=calculation_type,
375
+ symmetric=symmetric,
376
+ error_tolerance=error_tolerance,
377
+ max_iterations=max_iterations,
378
+ calculation_method=calculation_method,
379
+ tap_changing_strategy=tap_changing_strategy,
380
+ threading=threading,
381
+ experimental_features=experimental_features,
382
+ )
383
+ return self._calculate_impl(
384
+ calculation_type=calculation_type,
385
+ symmetric=symmetric,
386
+ update_data=update_data,
387
+ output_component_types=output_component_types,
388
+ options=options,
389
+ continue_on_batch_error=continue_on_batch_error,
390
+ decode_error=decode_error,
391
+ experimental_features=experimental_features,
392
+ )
393
+
394
+ def _calculate_state_estimation( # noqa: PLR0913
395
+ self,
396
+ *,
397
+ symmetric: bool = True,
398
+ error_tolerance: float = 1e-8,
399
+ max_iterations: int = 20,
400
+ calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
401
+ update_data: Dataset | list[Dataset] | None = None,
402
+ threading: int = -1,
403
+ output_component_types: ComponentAttributeMapping = None,
404
+ continue_on_batch_error: bool = False,
405
+ decode_error: bool = True,
406
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
407
+ ) -> Dataset:
408
+ calculation_type = CalculationType.state_estimation
409
+ options = self._options(
410
+ calculation_type=calculation_type,
411
+ symmetric=symmetric,
412
+ error_tolerance=error_tolerance,
413
+ max_iterations=max_iterations,
414
+ calculation_method=calculation_method,
415
+ threading=threading,
416
+ experimental_features=experimental_features,
417
+ )
418
+ return self._calculate_impl(
419
+ calculation_type=calculation_type,
420
+ symmetric=symmetric,
421
+ update_data=update_data,
422
+ output_component_types=output_component_types,
423
+ options=options,
424
+ continue_on_batch_error=continue_on_batch_error,
425
+ decode_error=decode_error,
426
+ experimental_features=experimental_features,
427
+ )
428
+
429
+ def _calculate_short_circuit( # noqa: PLR0913
430
+ self,
431
+ *,
432
+ calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
433
+ update_data: Dataset | list[Dataset] | None = None,
434
+ threading: int = -1,
435
+ output_component_types: ComponentAttributeMapping = None,
436
+ continue_on_batch_error: bool = False,
437
+ decode_error: bool = True,
438
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
439
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
440
+ ) -> Dataset:
441
+ calculation_type = CalculationType.short_circuit
442
+ symmetric = False
443
+
444
+ options = self._options(
445
+ calculation_type=calculation_type,
446
+ symmetric=symmetric,
447
+ calculation_method=calculation_method,
448
+ threading=threading,
449
+ short_circuit_voltage_scaling=short_circuit_voltage_scaling,
450
+ experimental_features=experimental_features,
451
+ )
452
+ return self._calculate_impl(
453
+ calculation_type=calculation_type,
454
+ symmetric=symmetric,
455
+ update_data=update_data,
456
+ output_component_types=output_component_types,
457
+ options=options,
458
+ continue_on_batch_error=continue_on_batch_error,
459
+ decode_error=decode_error,
460
+ experimental_features=experimental_features,
461
+ )
462
+
463
+ @overload
464
+ def calculate_power_flow(
465
+ self,
466
+ *,
467
+ symmetric: bool = ...,
468
+ error_tolerance: float = ...,
469
+ max_iterations: int = ...,
470
+ calculation_method: CalculationMethod | str = ...,
471
+ threading: int = ...,
472
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
473
+ continue_on_batch_error: bool = ...,
474
+ decode_error: bool = ...,
475
+ tap_changing_strategy: TapChangingStrategy | str = ...,
476
+ ) -> SingleRowBasedDataset: ...
477
+ @overload
478
+ def calculate_power_flow(
479
+ self,
480
+ *,
481
+ symmetric: bool = ...,
482
+ error_tolerance: float = ...,
483
+ max_iterations: int = ...,
484
+ calculation_method: CalculationMethod | str = ...,
485
+ update_data: None = ...,
486
+ threading: int = ...,
487
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
488
+ continue_on_batch_error: bool = ...,
489
+ decode_error: bool = ...,
490
+ tap_changing_strategy: TapChangingStrategy | str = ...,
491
+ ) -> SingleRowBasedDataset: ...
492
+ @overload
493
+ def calculate_power_flow(
494
+ self,
495
+ *,
496
+ symmetric: bool = ...,
497
+ error_tolerance: float = ...,
498
+ max_iterations: int = ...,
499
+ calculation_method: CalculationMethod | str = ...,
500
+ update_data: None = ...,
501
+ threading: int = ...,
502
+ output_component_types: ComponentAttributeFilterOptions = ...,
503
+ continue_on_batch_error: bool = ...,
504
+ decode_error: bool = ...,
505
+ tap_changing_strategy: TapChangingStrategy | str = ...,
506
+ ) -> SingleColumnarOutputDataset: ...
507
+ @overload
508
+ def calculate_power_flow(
509
+ self,
510
+ *,
511
+ symmetric: bool = ...,
512
+ error_tolerance: float = ...,
513
+ max_iterations: int = ...,
514
+ calculation_method: CalculationMethod | str = ...,
515
+ update_data: None = ...,
516
+ threading: int = ...,
517
+ output_component_types: ComponentAttributeMappingDict = ...,
518
+ continue_on_batch_error: bool = ...,
519
+ decode_error: bool = ...,
520
+ tap_changing_strategy: TapChangingStrategy | str = ...,
521
+ ) -> SingleOutputDataset: ...
522
+ @overload
523
+ def calculate_power_flow(
524
+ self,
525
+ *,
526
+ symmetric: bool = ...,
527
+ error_tolerance: float = ...,
528
+ max_iterations: int = ...,
529
+ calculation_method: CalculationMethod | str = ...,
530
+ update_data: BatchDataset | list[BatchDataset] = ...,
531
+ threading: int = ...,
532
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
533
+ continue_on_batch_error: bool = ...,
534
+ decode_error: bool = ...,
535
+ tap_changing_strategy: TapChangingStrategy | str = ...,
536
+ ) -> DenseBatchRowBasedOutputDataset: ...
537
+ @overload
538
+ def calculate_power_flow(
539
+ self,
540
+ *,
541
+ symmetric: bool = ...,
542
+ error_tolerance: float = ...,
543
+ max_iterations: int = ...,
544
+ calculation_method: CalculationMethod | str = ...,
545
+ update_data: BatchDataset | list[BatchDataset] = ...,
546
+ threading: int = ...,
547
+ output_component_types: ComponentAttributeFilterOptions = ...,
548
+ continue_on_batch_error: bool = ...,
549
+ decode_error: bool = ...,
550
+ tap_changing_strategy: TapChangingStrategy | str = ...,
551
+ ) -> DenseBatchColumnarOutputDataset: ...
552
+ @overload
553
+ def calculate_power_flow(
554
+ self,
555
+ *,
556
+ symmetric: bool = ...,
557
+ error_tolerance: float = ...,
558
+ max_iterations: int = ...,
559
+ calculation_method: CalculationMethod | str = ...,
560
+ update_data: BatchDataset | list[BatchDataset] = ...,
561
+ threading: int = ...,
562
+ output_component_types: ComponentAttributeMappingDict = ...,
563
+ continue_on_batch_error: bool = ...,
564
+ decode_error: bool = ...,
565
+ tap_changing_strategy: TapChangingStrategy | str = ...,
566
+ ) -> DenseBatchOutputDataset: ...
567
+ def calculate_power_flow( # noqa: PLR0913
568
+ self,
569
+ *,
570
+ symmetric: bool = True,
571
+ error_tolerance: float = 1e-8,
572
+ max_iterations: int = 20,
573
+ calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
574
+ update_data: BatchDataset | list[BatchDataset] | None = None,
575
+ threading: int = -1,
576
+ output_component_types: ComponentAttributeMapping = None,
577
+ continue_on_batch_error: bool = False,
578
+ decode_error: bool = True,
579
+ tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
580
+ ) -> Dataset:
581
+ """
582
+ Calculate power flow once with the current model attributes.
583
+ Or calculate in batch with the given update dataset in batch.
584
+
585
+ Args:
586
+ symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
587
+
588
+ - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
589
+ - False: Three-phase asymmetric calculation.
590
+ error_tolerance (float, optional): Error tolerance for voltage in p.u., applicable only when the
591
+ calculation method is iterative.
592
+ max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
593
+ is iterative.
594
+ calculation_method (an enumeration or string): The calculation method to use.
595
+
596
+ - newton_raphson: Use Newton-Raphson iterative method (default).
597
+ - linear: Use linear method.
598
+ update_data (dict, list of dict, optional):
599
+ None: Calculate power flow once with the current model attributes.
600
+
601
+ Or a dictionary for batch calculation with batch update.
602
+
603
+ - key: Component type name to be updated in batch.
604
+ - value:
605
+
606
+ - For homogeneous update batch (a 2D numpy structured array):
607
+
608
+ - Dimension 0: Each batch.
609
+ - Dimension 1: Each updated element per batch for this component type.
610
+ - For inhomogeneous update batch (a dictionary containing two keys):
611
+
612
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
613
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
614
+ compressed sparse structure.
615
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
616
+ - data: 1D numpy structured array in flat.
617
+ Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product.
618
+ The calculation core will interpret these datasets as a cartesian product of all the scenarios.
619
+ Each batch dataset in the list represents one dimension of the cartesian product.
620
+ The output will then have row size equal to the product of the batch sizes of all these datasets,
621
+ in 1D flat structure.
622
+ E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively,
623
+ and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5).
624
+ threading (int, optional): Applicable only for batch calculation.
625
+
626
+ - < 0: Sequential
627
+ - = 0: Parallel, use number of hardware threads
628
+ - > 0: Specify number of parallel threads
629
+ output_component_types (ComponentAttributeMapping):
630
+
631
+ - None: Row based data for all component types.
632
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
633
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
634
+ - ComponentAttributeMappingDict:
635
+ key: ComponentType
636
+ value:
637
+ - None: Row based data for the specified component types.
638
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
639
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
640
+ continue_on_batch_error (bool, optional):
641
+ Continue the program (instead of throwing error) if some scenarios fail.
642
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
643
+ decode_error (bool, optional):
644
+ Decode error messages to their derived types if possible.
645
+
646
+ Returns:
647
+ Dictionary of results of all components.
648
+
649
+ - key: Component type name to be updated in batch.
650
+ - value:
651
+
652
+ - For single calculation: 1D numpy structured array for the results of this component type.
653
+ - For batch calculation: 2D numpy structured array for the results of this component type.
654
+
655
+ - Dimension 0: Each batch.
656
+ - Dimension 1: The result of each element for this component type.
657
+
658
+ Raises:
659
+ Exception: In case an error in the core occurs, an exception will be thrown.
660
+ """
661
+ return self._calculate_power_flow(
662
+ symmetric=symmetric,
663
+ error_tolerance=error_tolerance,
664
+ max_iterations=max_iterations,
665
+ calculation_method=calculation_method,
666
+ update_data=update_data,
667
+ threading=threading,
668
+ output_component_types=output_component_types,
669
+ continue_on_batch_error=continue_on_batch_error,
670
+ decode_error=decode_error,
671
+ tap_changing_strategy=tap_changing_strategy,
672
+ )
673
+
674
+ @overload
675
+ def calculate_state_estimation(
676
+ self,
677
+ *,
678
+ symmetric: bool = ...,
679
+ error_tolerance: float = ...,
680
+ max_iterations: int = ...,
681
+ calculation_method: CalculationMethod | str = ...,
682
+ update_data: None = ...,
683
+ threading: int = ...,
684
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
685
+ continue_on_batch_error: bool = ...,
686
+ decode_error: bool = ...,
687
+ ) -> SingleRowBasedOutputDataset: ...
688
+ @overload
689
+ def calculate_state_estimation(
690
+ self,
691
+ *,
692
+ symmetric: bool = ...,
693
+ error_tolerance: float = ...,
694
+ max_iterations: int = ...,
695
+ calculation_method: CalculationMethod | str = ...,
696
+ update_data: None = ...,
697
+ threading: int = ...,
698
+ output_component_types: ComponentAttributeFilterOptions = ...,
699
+ continue_on_batch_error: bool = ...,
700
+ decode_error: bool = ...,
701
+ ) -> SingleColumnarOutputDataset: ...
702
+ @overload
703
+ def calculate_state_estimation(
704
+ self,
705
+ *,
706
+ symmetric: bool = ...,
707
+ error_tolerance: float = ...,
708
+ max_iterations: int = ...,
709
+ calculation_method: CalculationMethod | str = ...,
710
+ update_data: None = ...,
711
+ threading: int = ...,
712
+ output_component_types: ComponentAttributeMappingDict = ...,
713
+ continue_on_batch_error: bool = ...,
714
+ decode_error: bool = ...,
715
+ ) -> SingleOutputDataset: ...
716
+ @overload
717
+ def calculate_state_estimation(
718
+ self,
719
+ *,
720
+ symmetric: bool = ...,
721
+ error_tolerance: float = ...,
722
+ max_iterations: int = ...,
723
+ calculation_method: CalculationMethod | str = ...,
724
+ update_data: BatchDataset | list[BatchDataset] = ...,
725
+ threading: int = ...,
726
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
727
+ continue_on_batch_error: bool = ...,
728
+ decode_error: bool = ...,
729
+ ) -> DenseBatchRowBasedOutputDataset: ...
730
+ @overload
731
+ def calculate_state_estimation(
732
+ self,
733
+ *,
734
+ symmetric: bool = ...,
735
+ error_tolerance: float = ...,
736
+ max_iterations: int = ...,
737
+ calculation_method: CalculationMethod | str = ...,
738
+ update_data: BatchDataset | list[BatchDataset] = ...,
739
+ threading: int = ...,
740
+ output_component_types: ComponentAttributeFilterOptions = ...,
741
+ continue_on_batch_error: bool = ...,
742
+ decode_error: bool = ...,
743
+ ) -> DenseBatchColumnarOutputDataset: ...
744
+ @overload
745
+ def calculate_state_estimation(
746
+ self,
747
+ *,
748
+ symmetric: bool = ...,
749
+ error_tolerance: float = ...,
750
+ max_iterations: int = ...,
751
+ calculation_method: CalculationMethod | str = ...,
752
+ update_data: BatchDataset | list[BatchDataset] = ...,
753
+ threading: int = ...,
754
+ output_component_types: ComponentAttributeMappingDict = ...,
755
+ continue_on_batch_error: bool = ...,
756
+ decode_error: bool = ...,
757
+ ) -> DenseBatchOutputDataset: ...
758
+ def calculate_state_estimation( # noqa: PLR0913
759
+ self,
760
+ *,
761
+ symmetric: bool = True,
762
+ error_tolerance: float = 1e-8,
763
+ max_iterations: int = 20,
764
+ calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
765
+ update_data: BatchDataset | list[BatchDataset] | None = None,
766
+ threading: int = -1,
767
+ output_component_types: ComponentAttributeMapping = None,
768
+ continue_on_batch_error: bool = False,
769
+ decode_error: bool = True,
770
+ ) -> Dataset:
771
+ """
772
+ Calculate state estimation once with the current model attributes.
773
+ Or calculate in batch with the given update dataset in batch.
774
+
775
+ Args:
776
+ symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
777
+
778
+ - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
779
+ - False: Three-phase asymmetric calculation.
780
+ error_tolerance (float, optional): error tolerance for voltage in p.u., only applicable when the
781
+ calculation method is iterative.
782
+ max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
783
+ is iterative.
784
+ calculation_method (an enumeration): Use iterative linear method.
785
+ update_data (dict, optional):
786
+ None: Calculate state estimation once with the current model attributes.
787
+
788
+ Or a dictionary for batch calculation with batch update.
789
+
790
+ - key: Component type name to be updated in batch.
791
+ - value:
792
+
793
+ - For homogeneous update batch (a 2D numpy structured array):
794
+
795
+ - Dimension 0: Each batch.
796
+ - Dimension 1: Each updated element per batch for this component type.
797
+ - For inhomogeneous update batch (a dictionary containing two keys):
798
+
799
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
800
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
801
+ compressed sparse structure.
802
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
803
+ - data: 1D numpy structured array in flat.
804
+ Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product.
805
+ The calculation core will interpret these datasets as a cartesian product of all the scenarios.
806
+ Each batch dataset in the list represents one dimension of the cartesian product.
807
+ The output will then have row size equal to the product of the batch sizes of all these datasets,
808
+ in 1D flat structure.
809
+ E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively,
810
+ and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5).
811
+ threading (int, optional): Applicable only for batch calculation.
812
+
813
+ - < 0: Sequential
814
+ - = 0: Parallel, use number of hardware threads
815
+ - > 0: Specify number of parallel threads
816
+ output_component_types (ComponentAttributeMapping):
817
+
818
+ - None: Row based data for all component types.
819
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
820
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
821
+ - ComponentAttributeMappingDict:
822
+ key: ComponentType
823
+ value:
824
+ - None: Row based data for the specified component types.
825
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
826
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
827
+ continue_on_batch_error (bool, optional):
828
+ Continue the program (instead of throwing error) if some scenarios fail.
829
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
830
+ decode_error (bool, optional):
831
+ Decode error messages to their derived types if possible.
832
+
833
+ Returns:
834
+ Dictionary of results of all components.
835
+
836
+ - key: Component type name to be updated in batch.
837
+ - value:
838
+
839
+ - For single calculation: 1D numpy structured array for the results of this component type.
840
+ - For batch calculation: 2D numpy structured array for the results of this component type.
841
+
842
+ - Dimension 0: Each batch.
843
+ - Dimension 1: The result of each element for this component type.
844
+
845
+ Raises:
846
+ Exception: In case an error in the core occurs, an exception will be thrown.
847
+ """
848
+ return self._calculate_state_estimation(
849
+ symmetric=symmetric,
850
+ error_tolerance=error_tolerance,
851
+ max_iterations=max_iterations,
852
+ calculation_method=calculation_method,
853
+ update_data=update_data,
854
+ threading=threading,
855
+ output_component_types=output_component_types,
856
+ continue_on_batch_error=continue_on_batch_error,
857
+ decode_error=decode_error,
858
+ )
859
+
860
+ @overload
861
+ def calculate_short_circuit(
862
+ self,
863
+ *,
864
+ calculation_method: CalculationMethod | str = ...,
865
+ update_data: None = ...,
866
+ threading: int = ...,
867
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
868
+ continue_on_batch_error: bool = ...,
869
+ decode_error: bool = ...,
870
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
871
+ ) -> SingleRowBasedDataset: ...
872
+ @overload
873
+ def calculate_short_circuit(
874
+ self,
875
+ *,
876
+ calculation_method: CalculationMethod | str = ...,
877
+ update_data: None = ...,
878
+ threading: int = ...,
879
+ output_component_types: ComponentAttributeFilterOptions = ...,
880
+ continue_on_batch_error: bool = ...,
881
+ decode_error: bool = ...,
882
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
883
+ ) -> SingleColumnarOutputDataset: ...
884
+ @overload
885
+ def calculate_short_circuit(
886
+ self,
887
+ *,
888
+ calculation_method: CalculationMethod | str = ...,
889
+ update_data: None = ...,
890
+ threading: int = ...,
891
+ output_component_types: ComponentAttributeMappingDict = ...,
892
+ continue_on_batch_error: bool = ...,
893
+ decode_error: bool = ...,
894
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
895
+ ) -> SingleOutputDataset: ...
896
+ @overload
897
+ def calculate_short_circuit(
898
+ self,
899
+ *,
900
+ calculation_method: CalculationMethod | str = ...,
901
+ update_data: BatchDataset | list[BatchDataset] = ...,
902
+ threading: int = ...,
903
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
904
+ continue_on_batch_error: bool = ...,
905
+ decode_error: bool = ...,
906
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
907
+ ) -> DenseBatchRowBasedOutputDataset: ...
908
+ @overload
909
+ def calculate_short_circuit(
910
+ self,
911
+ *,
912
+ calculation_method: CalculationMethod | str = ...,
913
+ update_data: BatchDataset | list[BatchDataset] = ...,
914
+ threading: int = ...,
915
+ output_component_types: ComponentAttributeFilterOptions = ...,
916
+ continue_on_batch_error: bool = ...,
917
+ decode_error: bool = ...,
918
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
919
+ ) -> DenseBatchColumnarOutputDataset: ...
920
+ @overload
921
+ def calculate_short_circuit(
922
+ self,
923
+ *,
924
+ calculation_method: CalculationMethod | str = ...,
925
+ update_data: BatchDataset | list[BatchDataset] = ...,
926
+ threading: int = ...,
927
+ output_component_types: ComponentAttributeMappingDict = ...,
928
+ continue_on_batch_error: bool = ...,
929
+ decode_error: bool = ...,
930
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
931
+ ) -> DenseBatchOutputDataset: ...
932
+ def calculate_short_circuit( # noqa: PLR0913
933
+ self,
934
+ *,
935
+ calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
936
+ update_data: BatchDataset | list[BatchDataset] | None = None,
937
+ threading: int = -1,
938
+ output_component_types: ComponentAttributeMapping = None,
939
+ continue_on_batch_error: bool = False,
940
+ decode_error: bool = True,
941
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
942
+ ) -> Dataset:
943
+ """
944
+ Calculate a short circuit once with the current model attributes.
945
+ Or calculate in batch with the given update dataset in batch
946
+
947
+ Args:
948
+ calculation_method (an enumeration): Use the iec60909 standard.
949
+ update_data:
950
+ None: calculate a short circuit once with the current model attributes.
951
+
952
+ Or a dictionary for batch calculation with batch update
953
+
954
+ - key: Component type name to be updated in batch
955
+ - value:
956
+
957
+ - For homogeneous update batch (a 2D numpy structured array):
958
+
959
+ - Dimension 0: each batch
960
+ - Dimension 1: each updated element per batch for this component type
961
+ - For inhomogeneous update batch (a dictionary containing two keys):
962
+
963
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
964
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
965
+ compressed sparse structure.
966
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
967
+ - data: 1D numpy structured array in flat.
968
+ Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product.
969
+ The calculation core will interpret these datasets as a cartesian product of all the scenarios.
970
+ Each batch dataset in the list represents one dimension of the cartesian product.
971
+ The output will then have row size equal to the product of the batch sizes of all these datasets,
972
+ in 1D flat structure.
973
+ E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively,
974
+ and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5).
975
+ threading (int, optional): Applicable only for batch calculation.
976
+
977
+ - < 0: Sequential
978
+ - = 0: Parallel, use number of hardware threads
979
+ - > 0: Specify number of parallel threads
980
+ output_component_types (ComponentAttributeMapping):
981
+
982
+ - None: Row based data for all component types.
983
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
984
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
985
+ - ComponentAttributeMappingDict:
986
+ key: ComponentType
987
+ value:
988
+ - None: Row based data for the specified component types.
989
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
990
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
991
+ continue_on_batch_error (bool, optional):
992
+ Continue the program (instead of throwing error) if some scenarios fail.
993
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
994
+ decode_error (bool, optional):
995
+ Decode error messages to their derived types if possible.
996
+ short_circuit_voltage_scaling ({ShortCircuitVoltageSaling, str}, optional):
997
+ Whether to use the maximum or minimum voltage scaling.
998
+ By default, the maximum voltage scaling is used to calculate the short circuit.
999
+
1000
+ Returns:
1001
+ Dictionary of results of all components.
1002
+
1003
+ - key: Component type name to be updated in batch.
1004
+ - value:
1005
+
1006
+ - For single calculation: 1D numpy structured array for the results of this component type.
1007
+ - For batch calculation: 2D numpy structured array for the results of this component type.
1008
+
1009
+ - Dimension 0: Each batch.
1010
+ - Dimension 1: The result of each element for this component type.
1011
+ Raises:
1012
+ Exception: In case an error in the core occurs, an exception will be thrown.
1013
+ """
1014
+ return self._calculate_short_circuit(
1015
+ calculation_method=calculation_method,
1016
+ update_data=update_data,
1017
+ threading=threading,
1018
+ output_component_types=output_component_types,
1019
+ continue_on_batch_error=continue_on_batch_error,
1020
+ decode_error=decode_error,
1021
+ short_circuit_voltage_scaling=short_circuit_voltage_scaling,
1022
+ )
1023
+
1024
+ def __del__(self):
1025
+ get_pgc().destroy_model(self._model_ptr)