power-grid-model 1.10.74__py3-none-win_amd64.whl → 1.12.119__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of power-grid-model might be problematic. Click here for more details.

Files changed (67) hide show
  1. power_grid_model/__init__.py +54 -29
  2. power_grid_model/_core/__init__.py +3 -3
  3. power_grid_model/_core/buffer_handling.py +507 -478
  4. power_grid_model/_core/data_handling.py +195 -141
  5. power_grid_model/_core/data_types.py +142 -0
  6. power_grid_model/_core/dataset_definitions.py +109 -109
  7. power_grid_model/_core/enum.py +226 -0
  8. power_grid_model/_core/error_handling.py +215 -202
  9. power_grid_model/_core/errors.py +134 -0
  10. power_grid_model/_core/index_integer.py +17 -17
  11. power_grid_model/_core/options.py +71 -69
  12. power_grid_model/_core/power_grid_core.py +577 -597
  13. power_grid_model/_core/power_grid_dataset.py +545 -528
  14. power_grid_model/_core/power_grid_meta.py +262 -244
  15. power_grid_model/_core/power_grid_model.py +1025 -692
  16. power_grid_model/_core/power_grid_model_c/__init__.py +3 -0
  17. power_grid_model/_core/power_grid_model_c/bin/power_grid_model_c.dll +0 -0
  18. power_grid_model/_core/power_grid_model_c/get_pgm_dll_path.py +63 -0
  19. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/basics.h +251 -0
  20. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/buffer.h +108 -0
  21. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset.h +332 -0
  22. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset_definitions.h +1060 -0
  23. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/handle.h +111 -0
  24. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/meta_data.h +189 -0
  25. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/model.h +130 -0
  26. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/options.h +142 -0
  27. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/serialization.h +118 -0
  28. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c.h +36 -0
  29. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/basics.hpp +65 -0
  30. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/buffer.hpp +61 -0
  31. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/dataset.hpp +224 -0
  32. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/handle.hpp +108 -0
  33. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/meta_data.hpp +84 -0
  34. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/model.hpp +63 -0
  35. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/options.hpp +52 -0
  36. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/serialization.hpp +124 -0
  37. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/utils.hpp +81 -0
  38. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp.hpp +19 -0
  39. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfig.cmake +37 -0
  40. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfigVersion.cmake +65 -0
  41. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelTargets-release.cmake +19 -0
  42. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelTargets.cmake +144 -0
  43. power_grid_model/_core/power_grid_model_c/lib/power_grid_model_c.lib +0 -0
  44. power_grid_model/_core/power_grid_model_c/share/LICENSE +292 -0
  45. power_grid_model/_core/power_grid_model_c/share/README.md +15 -0
  46. power_grid_model/_core/serialization.py +319 -317
  47. power_grid_model/_core/typing.py +20 -0
  48. power_grid_model/{_utils.py → _core/utils.py} +798 -783
  49. power_grid_model/data_types.py +321 -319
  50. power_grid_model/enum.py +27 -214
  51. power_grid_model/errors.py +37 -123
  52. power_grid_model/typing.py +43 -48
  53. power_grid_model/utils.py +529 -400
  54. power_grid_model/validation/__init__.py +25 -14
  55. power_grid_model/validation/_rules.py +1167 -904
  56. power_grid_model/validation/_validation.py +1172 -980
  57. power_grid_model/validation/assertions.py +93 -92
  58. power_grid_model/validation/errors.py +602 -520
  59. power_grid_model/validation/utils.py +313 -318
  60. {power_grid_model-1.10.74.dist-info → power_grid_model-1.12.119.dist-info}/METADATA +162 -171
  61. power_grid_model-1.12.119.dist-info/RECORD +65 -0
  62. {power_grid_model-1.10.74.dist-info → power_grid_model-1.12.119.dist-info}/WHEEL +1 -1
  63. power_grid_model-1.12.119.dist-info/entry_points.txt +3 -0
  64. power_grid_model/_core/_power_grid_core.dll +0 -0
  65. power_grid_model-1.10.74.dist-info/RECORD +0 -32
  66. power_grid_model-1.10.74.dist-info/top_level.txt +0 -1
  67. {power_grid_model-1.10.74.dist-info → power_grid_model-1.12.119.dist-info}/licenses/LICENSE +0 -0
@@ -1,692 +1,1025 @@
1
- # SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
2
- #
3
- # SPDX-License-Identifier: MPL-2.0
4
-
5
- """
6
- Main power grid model class
7
- """
8
-
9
- from enum import IntEnum
10
- from typing import Type
11
-
12
- import numpy as np
13
-
14
- from power_grid_model._core.data_handling import (
15
- create_output_data,
16
- get_output_type,
17
- prepare_input_view,
18
- prepare_output_view,
19
- prepare_update_view,
20
- )
21
- from power_grid_model._core.dataset_definitions import (
22
- ComponentType,
23
- ComponentTypeLike,
24
- _map_to_component_types,
25
- _str_to_component_type,
26
- )
27
- from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors
28
- from power_grid_model._core.index_integer import IdNp, IdxNp
29
- from power_grid_model._core.options import Options
30
- from power_grid_model._core.power_grid_core import ConstDatasetPtr, IDPtr, IdxPtr, ModelPtr, power_grid_core as pgc
31
- from power_grid_model.data_types import Dataset, SingleDataset
32
- from power_grid_model.enum import (
33
- CalculationMethod,
34
- CalculationType,
35
- ShortCircuitVoltageScaling,
36
- TapChangingStrategy,
37
- _ExperimentalFeatures,
38
- )
39
- from power_grid_model.typing import ComponentAttributeMapping
40
-
41
-
42
- class PowerGridModel:
43
- """
44
- Main class for Power Grid Model
45
- """
46
-
47
- _model_ptr: ModelPtr
48
- _all_component_count: dict[ComponentType, int] | None
49
- _batch_error: PowerGridBatchError | None
50
-
51
- @property
52
- def batch_error(self) -> PowerGridBatchError | None:
53
- """
54
- Get the batch error object, if present, after a batch calculation with errors.
55
-
56
- Also works when continue_on_batch_error was set to True during the calculation.
57
-
58
- Returns:
59
- Batch error object, or None
60
- """
61
- return self._batch_error
62
-
63
- @property
64
- def _model(self):
65
- if not self._model_ptr:
66
- raise TypeError("You have an empty instance of PowerGridModel!")
67
- return self._model_ptr
68
-
69
- @property
70
- def all_component_count(self) -> dict[ComponentType, int]:
71
- """
72
- Get amount of elements per component type.
73
- If the count for a component type is zero, it will not be in the returned dictionary.
74
-
75
- Returns:
76
- A dictionary with
77
-
78
- - key: Component type name
79
- - value: Integer count of elements of this type
80
- """
81
- if self._all_component_count is None:
82
- raise TypeError("You have an empty instance of PowerGridModel!")
83
- return self._all_component_count
84
-
85
- def copy(self) -> "PowerGridModel":
86
- """
87
- Copy the current model
88
-
89
- Returns:
90
- A copy of PowerGridModel
91
- """
92
- new_model = PowerGridModel.__new__(PowerGridModel)
93
- new_model._model_ptr = pgc.copy_model(self._model) # pylint: disable=W0212
94
- assert_no_error()
95
- new_model._all_component_count = self._all_component_count # pylint: disable=W0212
96
- return new_model
97
-
98
- def __copy__(self):
99
- return self.copy()
100
-
101
- def __new__(cls, *_args, **_kwargs):
102
- instance = super().__new__(cls)
103
- instance._model_ptr = ModelPtr()
104
- instance._all_component_count = None
105
- return instance
106
-
107
- def __init__(self, input_data: SingleDataset, system_frequency: float = 50.0):
108
- """
109
- Initialize the model from an input data set.
110
-
111
- Args:
112
- input_data: Input data dictionary
113
-
114
- - key: Component type
115
- - value: Component data with the correct type :class:`SingleComponentData`
116
-
117
- system_frequency: Frequency of the power system, default 50 Hz
118
- """
119
- # destroy old instance
120
- pgc.destroy_model(self._model_ptr)
121
- self._all_component_count = None
122
- # create new
123
- prepared_input = prepare_input_view(_map_to_component_types(input_data))
124
- self._model_ptr = pgc.create_model(system_frequency, input_data=prepared_input.get_dataset_ptr())
125
- assert_no_error()
126
- self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0}
127
-
128
- def update(self, *, update_data: Dataset):
129
- """
130
- Update the model with changes.
131
-
132
- The model will be in an invalid state if the update fails and should be discarded.
133
-
134
- Args:
135
- update_data: Update data dictionary
136
-
137
- - key: Component type
138
- - value: Component data with the correct type :class:`ComponentData` (single scenario or batch)
139
-
140
- Raises:
141
- PowerGridError if the update fails. The model is left in an invalid state and should be discarded.
142
-
143
- Returns:
144
- None
145
- """
146
- prepared_update = prepare_update_view(_map_to_component_types(update_data))
147
- pgc.update_model(self._model, prepared_update.get_dataset_ptr())
148
- assert_no_error()
149
-
150
- def get_indexer(self, component_type: ComponentTypeLike, ids: np.ndarray):
151
- """
152
- Get array of indexers given array of ids for component type.
153
-
154
- This enables syntax like input_data[ComponentType.node][get_indexer(ids)]
155
-
156
- Args:
157
- component_type: Type of component
158
- ids: Array of ids
159
-
160
- Returns:
161
- Array of indexers, same shape as input array ids
162
- """
163
- component_type = _str_to_component_type(component_type)
164
- ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)
165
- indexer = np.empty_like(ids, dtype=IdxNp, order="C")
166
- indexer_c = indexer.ctypes.data_as(IdxPtr)
167
- size = ids.size
168
- # call c function
169
- pgc.get_indexer(self._model, component_type, size, ids_c, indexer_c)
170
- assert_no_error()
171
- return indexer
172
-
173
- def _get_output_component_count(self, calculation_type: CalculationType):
174
- exclude_types = {
175
- CalculationType.power_flow: [
176
- ComponentType.sym_voltage_sensor,
177
- ComponentType.asym_voltage_sensor,
178
- ComponentType.sym_power_sensor,
179
- ComponentType.asym_power_sensor,
180
- ComponentType.fault,
181
- ],
182
- CalculationType.state_estimation: [ComponentType.fault],
183
- CalculationType.short_circuit: [
184
- ComponentType.sym_voltage_sensor,
185
- ComponentType.asym_voltage_sensor,
186
- ComponentType.sym_power_sensor,
187
- ComponentType.asym_power_sensor,
188
- ],
189
- }.get(calculation_type, [])
190
-
191
- def include_type(component_type: ComponentType):
192
- for exclude_type in exclude_types:
193
- if exclude_type.value in component_type.value:
194
- return False
195
- return True
196
-
197
- return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)}
198
-
199
- # pylint: disable=too-many-arguments
200
- def _construct_output( # pylint: disable=too-many-positional-arguments
201
- self,
202
- output_component_types: ComponentAttributeMapping,
203
- calculation_type: CalculationType,
204
- symmetric: bool,
205
- is_batch: bool,
206
- batch_size: int,
207
- ) -> dict[ComponentType, np.ndarray]:
208
- all_component_count = self._get_output_component_count(calculation_type=calculation_type)
209
- return create_output_data(
210
- output_component_types=output_component_types,
211
- output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
212
- all_component_count=all_component_count,
213
- is_batch=is_batch,
214
- batch_size=batch_size,
215
- )
216
-
217
- @staticmethod
218
- def _options(**kwargs) -> Options:
219
- def as_enum_value(key_enum: str, type_: Type[IntEnum]):
220
- if key_enum in kwargs:
221
- value_enum = kwargs[key_enum]
222
- if isinstance(value_enum, str):
223
- kwargs[key_enum] = type_[value_enum]
224
-
225
- as_enum_value("calculation_method", CalculationMethod)
226
- as_enum_value("tap_changing_strategy", TapChangingStrategy)
227
- as_enum_value("short_circuit_voltage_scaling", ShortCircuitVoltageScaling)
228
- as_enum_value("experimental_features", _ExperimentalFeatures)
229
-
230
- opt = Options()
231
- for key, value in kwargs.items():
232
- setattr(opt, key, value.value if isinstance(value, IntEnum) else value)
233
- return opt
234
-
235
- def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_error: bool):
236
- self._batch_error = handle_errors(
237
- continue_on_batch_error=continue_on_batch_error,
238
- batch_size=batch_size,
239
- decode_error=decode_error,
240
- )
241
-
242
- # pylint: disable=too-many-arguments
243
- def _calculate_impl( # pylint: disable=too-many-positional-arguments
244
- self,
245
- calculation_type: CalculationType,
246
- symmetric: bool,
247
- update_data: Dataset | None,
248
- output_component_types: ComponentAttributeMapping,
249
- options: Options,
250
- continue_on_batch_error: bool,
251
- decode_error: bool,
252
- experimental_features: _ExperimentalFeatures | str, # pylint: disable=too-many-arguments,unused-argument
253
- ):
254
- """
255
- Core calculation routine
256
-
257
- Args:
258
- calculation_type:
259
- symmetric:
260
- update_data:
261
- output_component_types:
262
- options:
263
- continue_on_batch_error:
264
- decode_error:
265
-
266
- Returns:
267
- """
268
- self._batch_error = None
269
- is_batch = update_data is not None
270
-
271
- if update_data is not None:
272
- prepared_update = prepare_update_view(update_data)
273
- update_ptr = prepared_update.get_dataset_ptr()
274
- batch_size = prepared_update.get_info().batch_size()
275
- else:
276
- update_ptr = ConstDatasetPtr()
277
- batch_size = 1
278
-
279
- output_data = self._construct_output(
280
- output_component_types=output_component_types,
281
- calculation_type=calculation_type,
282
- symmetric=symmetric,
283
- is_batch=is_batch,
284
- batch_size=batch_size,
285
- )
286
- prepared_result = prepare_output_view(
287
- output_data=output_data,
288
- output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
289
- )
290
-
291
- # run calculation
292
- pgc.calculate(
293
- # model and options
294
- self._model,
295
- options.opt,
296
- output_data=prepared_result.get_dataset_ptr(),
297
- update_data=update_ptr,
298
- )
299
-
300
- self._handle_errors(
301
- continue_on_batch_error=continue_on_batch_error,
302
- batch_size=batch_size,
303
- decode_error=decode_error,
304
- )
305
-
306
- return output_data
307
-
308
- def _calculate_power_flow(
309
- self,
310
- *,
311
- symmetric: bool = True,
312
- error_tolerance: float = 1e-8,
313
- max_iterations: int = 20,
314
- calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
315
- update_data: Dataset | None = None,
316
- threading: int = -1,
317
- output_component_types: ComponentAttributeMapping = None,
318
- continue_on_batch_error: bool = False,
319
- decode_error: bool = True,
320
- tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
321
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
322
- ):
323
- calculation_type = CalculationType.power_flow
324
- options = self._options(
325
- calculation_type=calculation_type,
326
- symmetric=symmetric,
327
- error_tolerance=error_tolerance,
328
- max_iterations=max_iterations,
329
- calculation_method=calculation_method,
330
- tap_changing_strategy=tap_changing_strategy,
331
- threading=threading,
332
- experimental_features=experimental_features,
333
- )
334
- return self._calculate_impl(
335
- calculation_type=calculation_type,
336
- symmetric=symmetric,
337
- update_data=update_data,
338
- output_component_types=output_component_types,
339
- options=options,
340
- continue_on_batch_error=continue_on_batch_error,
341
- decode_error=decode_error,
342
- experimental_features=experimental_features,
343
- )
344
-
345
- def _calculate_state_estimation(
346
- self,
347
- *,
348
- symmetric: bool = True,
349
- error_tolerance: float = 1e-8,
350
- max_iterations: int = 20,
351
- calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
352
- update_data: Dataset | None = None,
353
- threading: int = -1,
354
- output_component_types: ComponentAttributeMapping = None,
355
- continue_on_batch_error: bool = False,
356
- decode_error: bool = True,
357
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
358
- ) -> dict[ComponentType, np.ndarray]:
359
- calculation_type = CalculationType.state_estimation
360
- options = self._options(
361
- calculation_type=calculation_type,
362
- symmetric=symmetric,
363
- error_tolerance=error_tolerance,
364
- max_iterations=max_iterations,
365
- calculation_method=calculation_method,
366
- threading=threading,
367
- experimental_features=experimental_features,
368
- )
369
- return self._calculate_impl(
370
- calculation_type=calculation_type,
371
- symmetric=symmetric,
372
- update_data=update_data,
373
- output_component_types=output_component_types,
374
- options=options,
375
- continue_on_batch_error=continue_on_batch_error,
376
- decode_error=decode_error,
377
- experimental_features=experimental_features,
378
- )
379
-
380
- def _calculate_short_circuit(
381
- self,
382
- *,
383
- calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
384
- update_data: Dataset | None = None,
385
- threading: int = -1,
386
- output_component_types: ComponentAttributeMapping = None,
387
- continue_on_batch_error: bool = False,
388
- decode_error: bool = True,
389
- short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
390
- experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
391
- ) -> dict[ComponentType, np.ndarray]:
392
- calculation_type = CalculationType.short_circuit
393
- symmetric = False
394
-
395
- options = self._options(
396
- calculation_type=calculation_type,
397
- symmetric=symmetric,
398
- calculation_method=calculation_method,
399
- threading=threading,
400
- short_circuit_voltage_scaling=short_circuit_voltage_scaling,
401
- experimental_features=experimental_features,
402
- )
403
- return self._calculate_impl(
404
- calculation_type=calculation_type,
405
- symmetric=symmetric,
406
- update_data=update_data,
407
- output_component_types=output_component_types,
408
- options=options,
409
- continue_on_batch_error=continue_on_batch_error,
410
- decode_error=decode_error,
411
- experimental_features=experimental_features,
412
- )
413
-
414
- def calculate_power_flow(
415
- self,
416
- *,
417
- symmetric: bool = True,
418
- error_tolerance: float = 1e-8,
419
- max_iterations: int = 20,
420
- calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
421
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
422
- threading: int = -1,
423
- output_component_types: ComponentAttributeMapping = None,
424
- continue_on_batch_error: bool = False,
425
- decode_error: bool = True,
426
- tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
427
- ) -> dict[ComponentType, np.ndarray]:
428
- """
429
- Calculate power flow once with the current model attributes.
430
- Or calculate in batch with the given update dataset in batch.
431
-
432
- Args:
433
- symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
434
-
435
- - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
436
- - False: Three-phase asymmetric calculation.
437
- error_tolerance (float, optional): Error tolerance for voltage in p.u., applicable only when the
438
- calculation method is iterative.
439
- max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
440
- is iterative.
441
- calculation_method (an enumeration or string): The calculation method to use.
442
-
443
- - newton_raphson: Use Newton-Raphson iterative method (default).
444
- - linear: Use linear method.
445
- update_data (dict, optional):
446
- None: Calculate power flow once with the current model attributes.
447
- Or a dictionary for batch calculation with batch update.
448
-
449
- - key: Component type name to be updated in batch.
450
- - value:
451
-
452
- - For homogeneous update batch (a 2D numpy structured array):
453
-
454
- - Dimension 0: Each batch.
455
- - Dimension 1: Each updated element per batch for this component type.
456
- - For inhomogeneous update batch (a dictionary containing two keys):
457
-
458
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
459
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
460
- compressed sparse structure.
461
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
462
- - data: 1D numpy structured array in flat.
463
- threading (int, optional): Applicable only for batch calculation.
464
-
465
- - < 0: Sequential
466
- - = 0: Parallel, use number of hardware threads
467
- - > 0: Specify number of parallel threads
468
- output_component_types (ComponentAttributeMapping):
469
-
470
- - None: Row based data for all component types.
471
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
472
- - ComponentAttributeFilterOptions: Columnar data for all component types.
473
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
474
- key: ComponentType
475
- value:
476
- - None: Row based data for the specified component types.
477
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
478
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
479
- continue_on_batch_error (bool, optional):
480
- Continue the program (instead of throwing error) if some scenarios fail.
481
- You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
482
- decode_error (bool, optional):
483
- Decode error messages to their derived types if possible.
484
-
485
- Returns:
486
- Dictionary of results of all components.
487
-
488
- - key: Component type name to be updated in batch.
489
- - value:
490
-
491
- - For single calculation: 1D numpy structured array for the results of this component type.
492
- - For batch calculation: 2D numpy structured array for the results of this component type.
493
-
494
- - Dimension 0: Each batch.
495
- - Dimension 1: The result of each element for this component type.
496
-
497
- Raises:
498
- Exception: In case an error in the core occurs, an exception will be thrown.
499
- """
500
- return self._calculate_power_flow(
501
- symmetric=symmetric,
502
- error_tolerance=error_tolerance,
503
- max_iterations=max_iterations,
504
- calculation_method=calculation_method,
505
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
506
- threading=threading,
507
- output_component_types=output_component_types,
508
- continue_on_batch_error=continue_on_batch_error,
509
- decode_error=decode_error,
510
- tap_changing_strategy=tap_changing_strategy,
511
- )
512
-
513
- def calculate_state_estimation(
514
- self,
515
- *,
516
- symmetric: bool = True,
517
- error_tolerance: float = 1e-8,
518
- max_iterations: int = 20,
519
- calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
520
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
521
- threading: int = -1,
522
- output_component_types: ComponentAttributeMapping = None,
523
- continue_on_batch_error: bool = False,
524
- decode_error: bool = True,
525
- ) -> dict[ComponentType, np.ndarray]:
526
- """
527
- Calculate state estimation once with the current model attributes.
528
- Or calculate in batch with the given update dataset in batch.
529
-
530
- Args:
531
- symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
532
-
533
- - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
534
- - False: Three-phase asymmetric calculation.
535
- error_tolerance (float, optional): error tolerance for voltage in p.u., only applicable when the
536
- calculation method is iterative.
537
- max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
538
- is iterative.
539
- calculation_method (an enumeration): Use iterative linear method.
540
- update_data (dict, optional):
541
- None: Calculate state estimation once with the current model attributes.
542
- Or a dictionary for batch calculation with batch update.
543
-
544
- - key: Component type name to be updated in batch.
545
- - value:
546
-
547
- - For homogeneous update batch (a 2D numpy structured array):
548
-
549
- - Dimension 0: Each batch.
550
- - Dimension 1: Each updated element per batch for this component type.
551
- - For inhomogeneous update batch (a dictionary containing two keys):
552
-
553
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
554
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
555
- compressed sparse structure.
556
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
557
- - data: 1D numpy structured array in flat.
558
- threading (int, optional): Applicable only for batch calculation.
559
-
560
- - < 0: Sequential
561
- - = 0: Parallel, use number of hardware threads
562
- - > 0: Specify number of parallel threads
563
- output_component_types (ComponentAttributeMapping):
564
-
565
- - None: Row based data for all component types.
566
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
567
- - ComponentAttributeFilterOptions: Columnar data for all component types.
568
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
569
- key: ComponentType
570
- value:
571
- - None: Row based data for the specified component types.
572
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
573
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
574
- continue_on_batch_error (bool, optional):
575
- Continue the program (instead of throwing error) if some scenarios fail.
576
- You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
577
- decode_error (bool, optional):
578
- Decode error messages to their derived types if possible.
579
-
580
- Returns:
581
- Dictionary of results of all components.
582
-
583
- - key: Component type name to be updated in batch.
584
- - value:
585
-
586
- - For single calculation: 1D numpy structured array for the results of this component type.
587
- - For batch calculation: 2D numpy structured array for the results of this component type.
588
-
589
- - Dimension 0: Each batch.
590
- - Dimension 1: The result of each element for this component type.
591
-
592
- Raises:
593
- Exception: In case an error in the core occurs, an exception will be thrown.
594
- """
595
- return self._calculate_state_estimation(
596
- symmetric=symmetric,
597
- error_tolerance=error_tolerance,
598
- max_iterations=max_iterations,
599
- calculation_method=calculation_method,
600
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
601
- threading=threading,
602
- output_component_types=output_component_types,
603
- continue_on_batch_error=continue_on_batch_error,
604
- decode_error=decode_error,
605
- )
606
-
607
- def calculate_short_circuit(
608
- self,
609
- *,
610
- calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
611
- update_data: dict[str, np.ndarray | dict[str, np.ndarray]] | Dataset | None = None,
612
- threading: int = -1,
613
- output_component_types: ComponentAttributeMapping = None,
614
- continue_on_batch_error: bool = False,
615
- decode_error: bool = True,
616
- short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
617
- ) -> dict[ComponentType, np.ndarray]:
618
- """
619
- Calculate a short circuit once with the current model attributes.
620
- Or calculate in batch with the given update dataset in batch
621
-
622
- Args:
623
- calculation_method (an enumeration): Use the iec60909 standard.
624
- update_data:
625
- None: calculate a short circuit once with the current model attributes.
626
- Or a dictionary for batch calculation with batch update
627
-
628
- - key: Component type name to be updated in batch
629
- - value:
630
-
631
- - For homogeneous update batch (a 2D numpy structured array):
632
-
633
- - Dimension 0: each batch
634
- - Dimension 1: each updated element per batch for this component type
635
- - For inhomogeneous update batch (a dictionary containing two keys):
636
-
637
- - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
638
- update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
639
- compressed sparse structure.
640
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
641
- - data: 1D numpy structured array in flat.
642
- threading (int, optional): Applicable only for batch calculation.
643
-
644
- - < 0: Sequential
645
- - = 0: Parallel, use number of hardware threads
646
- - > 0: Specify number of parallel threads
647
- output_component_types (ComponentAttributeMapping):
648
-
649
- - None: Row based data for all component types.
650
- - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
651
- - ComponentAttributeFilterOptions: Columnar data for all component types.
652
- - dict[ComponentType, set[str] | list[str] | None | ComponentAttributeFilterOptions]:
653
- key: ComponentType
654
- value:
655
- - None: Row based data for the specified component types.
656
- - ComponentAttributeFilterOptions: Columnar data for the specified component types.
657
- - set[str] | list[str]: Columnar data for the specified component types and attributes.
658
- continue_on_batch_error (bool, optional):
659
- Continue the program (instead of throwing error) if some scenarios fail.
660
- You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
661
- decode_error (bool, optional):
662
- Decode error messages to their derived types if possible.
663
- short_circuit_voltage_scaling ({ShortCircuitVoltageSaling, str}, optional):
664
- Whether to use the maximum or minimum voltage scaling.
665
- By default, the maximum voltage scaling is used to calculate the short circuit.
666
-
667
- Returns:
668
- Dictionary of results of all components.
669
-
670
- - key: Component type name to be updated in batch.
671
- - value:
672
-
673
- - For single calculation: 1D numpy structured array for the results of this component type.
674
- - For batch calculation: 2D numpy structured array for the results of this component type.
675
-
676
- - Dimension 0: Each batch.
677
- - Dimension 1: The result of each element for this component type.
678
- Raises:
679
- Exception: In case an error in the core occurs, an exception will be thrown.
680
- """
681
- return self._calculate_short_circuit(
682
- calculation_method=calculation_method,
683
- update_data=(_map_to_component_types(update_data) if update_data is not None else None),
684
- threading=threading,
685
- output_component_types=output_component_types,
686
- continue_on_batch_error=continue_on_batch_error,
687
- decode_error=decode_error,
688
- short_circuit_voltage_scaling=short_circuit_voltage_scaling,
689
- )
690
-
691
- def __del__(self):
692
- pgc.destroy_model(self._model_ptr)
1
+ # SPDX-FileCopyrightText: Contributors to the Power Grid Model project <powergridmodel@lfenergy.org>
2
+ #
3
+ # SPDX-License-Identifier: MPL-2.0
4
+
5
+ """
6
+ Main power grid model class
7
+ """
8
+
9
+ import itertools
10
+ from enum import IntEnum
11
+ from math import prod
12
+ from typing import Any, overload
13
+
14
+ import numpy as np
15
+
16
+ from power_grid_model._core.data_handling import (
17
+ create_output_data,
18
+ get_output_type,
19
+ prepare_input_view,
20
+ prepare_output_view,
21
+ prepare_update_view,
22
+ )
23
+ from power_grid_model._core.data_types import (
24
+ BatchDataset,
25
+ Dataset,
26
+ DenseBatchColumnarOutputDataset,
27
+ DenseBatchOutputDataset,
28
+ DenseBatchRowBasedOutputDataset,
29
+ SingleColumnarOutputDataset,
30
+ SingleDataset,
31
+ SingleOutputDataset,
32
+ SingleRowBasedDataset,
33
+ SingleRowBasedOutputDataset,
34
+ )
35
+ from power_grid_model._core.dataset_definitions import (
36
+ ComponentType,
37
+ ComponentTypeLike,
38
+ ComponentTypeVar,
39
+ _map_to_component_types,
40
+ _str_to_component_type,
41
+ )
42
+ from power_grid_model._core.enum import (
43
+ CalculationMethod,
44
+ CalculationType,
45
+ ComponentAttributeFilterOptions,
46
+ ShortCircuitVoltageScaling,
47
+ TapChangingStrategy,
48
+ _ExperimentalFeatures,
49
+ )
50
+ from power_grid_model._core.error_handling import PowerGridBatchError, assert_no_error, handle_errors
51
+ from power_grid_model._core.index_integer import IdNp, IdxNp
52
+ from power_grid_model._core.options import Options
53
+ from power_grid_model._core.power_grid_core import (
54
+ ConstDatasetPtr,
55
+ IDPtr,
56
+ IdxPtr,
57
+ ModelPtr,
58
+ get_power_grid_core as get_pgc,
59
+ )
60
+ from power_grid_model._core.typing import ComponentAttributeMapping, ComponentAttributeMappingDict
61
+
62
+
63
+ class PowerGridModel:
64
+ """
65
+ Main class for Power Grid Model
66
+ """
67
+
68
+ _model_ptr: ModelPtr
69
+ _all_component_count: dict[ComponentType, int] | None
70
+ _batch_error: PowerGridBatchError | None
71
+
72
+ @property
73
+ def batch_error(self) -> PowerGridBatchError | None:
74
+ """
75
+ Get the batch error object, if present, after a batch calculation with errors.
76
+
77
+ Also works when continue_on_batch_error was set to True during the calculation.
78
+
79
+ Returns:
80
+ Batch error object, or None
81
+ """
82
+ return self._batch_error
83
+
84
+ @property
85
+ def _model(self):
86
+ if not self._model_ptr:
87
+ raise TypeError("You have an empty instance of PowerGridModel!")
88
+ return self._model_ptr
89
+
90
+ @property
91
+ def all_component_count(self) -> dict[ComponentType, int]:
92
+ """
93
+ Get amount of elements per component type.
94
+ If the count for a component type is zero, it will not be in the returned dictionary.
95
+
96
+ Returns:
97
+ A dictionary with
98
+
99
+ - key: Component type name
100
+ - value: Integer count of elements of this type
101
+ """
102
+ if self._all_component_count is None:
103
+ raise TypeError("You have an empty instance of PowerGridModel!")
104
+ return self._all_component_count
105
+
106
+ def copy(self) -> "PowerGridModel":
107
+ """
108
+ Copy the current model
109
+
110
+ Returns:
111
+ A copy of PowerGridModel
112
+ """
113
+ new_model = PowerGridModel.__new__(PowerGridModel)
114
+ new_model._model_ptr = get_pgc().copy_model(self._model)
115
+ assert_no_error()
116
+ new_model._all_component_count = self._all_component_count
117
+ return new_model
118
+
119
+ def __copy__(self) -> "PowerGridModel":
120
+ return self.copy()
121
+
122
+ def __deepcopy__(self, memo: dict[int, Any]) -> "PowerGridModel":
123
+ # PowerGridModel.copy makes already a deepcopy
124
+ new_model = self.copy()
125
+
126
+ # memorize that this object (self) has been deepcopied
127
+ memo[id(self)] = new_model
128
+
129
+ return new_model
130
+
131
+ def __repr__(self) -> str:
132
+ """Return a string representation of the current model.
133
+
134
+ This includes the total number of components and the number of components per component type of the model.
135
+
136
+ Returns:
137
+ String representation of the model
138
+ """
139
+ try:
140
+ component_count = self.all_component_count
141
+ except TypeError:
142
+ component_count = {}
143
+
144
+ message = f"{self.__class__.__name__} ({sum(component_count.values())} components)\n"
145
+
146
+ for component_type, number in component_count.items():
147
+ message += f" - {component_type.value}: {number}\n"
148
+
149
+ return message
150
+
151
+ def __new__(cls, *_args, **_kwargs):
152
+ instance = super().__new__(cls)
153
+ instance._model_ptr = ModelPtr()
154
+ instance._all_component_count = None
155
+ return instance
156
+
157
+ def __init__(self, input_data: SingleDataset, system_frequency: float = 50.0):
158
+ """
159
+ Initialize the model from an input data set.
160
+
161
+ Args:
162
+ input_data: Input data dictionary
163
+
164
+ - key: Component type
165
+ - value: Component data with the correct type :class:`SingleComponentData`
166
+
167
+ system_frequency: Frequency of the power system, default 50 Hz
168
+ """
169
+ # destroy old instance
170
+ get_pgc().destroy_model(self._model_ptr)
171
+ self._all_component_count = None
172
+ # create new
173
+ prepared_input = prepare_input_view(_map_to_component_types(input_data))
174
+ self._model_ptr = get_pgc().create_model(system_frequency, input_data=prepared_input.get_dataset_ptr())
175
+ assert_no_error()
176
+ self._all_component_count = {k: v for k, v in prepared_input.get_info().total_elements().items() if v > 0}
177
+
178
+ def update(self, *, update_data: Dataset):
179
+ """
180
+ Update the model with changes.
181
+
182
+ The model will be in an invalid state if the update fails and should be discarded.
183
+
184
+ Args:
185
+ update_data: Update data dictionary
186
+
187
+ - key: Component type
188
+ - value: Component data with the correct type :class:`ComponentData` (single scenario or batch)
189
+
190
+ Raises:
191
+ PowerGridError if the update fails. The model is left in an invalid state and should be discarded.
192
+
193
+ Returns:
194
+ None
195
+ """
196
+ prepared_update = prepare_update_view(_map_to_component_types(update_data))
197
+ get_pgc().update_model(self._model, prepared_update.get_dataset_ptr())
198
+ assert_no_error()
199
+
200
+ def get_indexer(self, component_type: ComponentTypeLike, ids: np.ndarray):
201
+ """
202
+ Get array of indexers given array of ids for component type.
203
+
204
+ This enables syntax like input_data[ComponentType.node][get_indexer(ids)]
205
+
206
+ Args:
207
+ component_type: Type of component
208
+ ids: Array of ids
209
+
210
+ Returns:
211
+ Array of indexers, same shape as input array ids
212
+ """
213
+ component_type = _str_to_component_type(component_type)
214
+ ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)
215
+ indexer = np.empty_like(ids, dtype=IdxNp, order="C")
216
+ indexer_c = indexer.ctypes.data_as(IdxPtr)
217
+ size = ids.size
218
+ # call c function
219
+ get_pgc().get_indexer(self._model, component_type, size, ids_c, indexer_c)
220
+ assert_no_error()
221
+ return indexer
222
+
223
+ def _get_output_component_count(self, calculation_type: CalculationType):
224
+ exclude_types = {
225
+ CalculationType.power_flow: [
226
+ ComponentType.sym_voltage_sensor,
227
+ ComponentType.asym_voltage_sensor,
228
+ ComponentType.sym_power_sensor,
229
+ ComponentType.asym_power_sensor,
230
+ ComponentType.fault,
231
+ ],
232
+ CalculationType.state_estimation: [ComponentType.fault],
233
+ CalculationType.short_circuit: [
234
+ ComponentType.sym_voltage_sensor,
235
+ ComponentType.asym_voltage_sensor,
236
+ ComponentType.sym_power_sensor,
237
+ ComponentType.asym_power_sensor,
238
+ ],
239
+ }.get(calculation_type, [])
240
+
241
+ def include_type(component_type: ComponentType):
242
+ return all(exclude_type.value not in component_type.value for exclude_type in exclude_types)
243
+
244
+ return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)}
245
+
246
+ def _construct_output(
247
+ self,
248
+ output_component_types: ComponentAttributeMapping,
249
+ calculation_type: CalculationType,
250
+ symmetric: bool,
251
+ is_batch: bool,
252
+ batch_size: int,
253
+ ):
254
+ all_component_count = self._get_output_component_count(calculation_type=calculation_type)
255
+ return create_output_data(
256
+ output_component_types=output_component_types,
257
+ output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
258
+ all_component_count=all_component_count,
259
+ is_batch=is_batch,
260
+ batch_size=batch_size,
261
+ )
262
+
263
+ @staticmethod
264
+ def _options(**kwargs) -> Options:
265
+ def as_enum_value(key_enum: str, type_: type[IntEnum]):
266
+ if key_enum in kwargs:
267
+ value_enum = kwargs[key_enum]
268
+ if isinstance(value_enum, str):
269
+ kwargs[key_enum] = type_[value_enum] # NOSONAR(S5864) IntEnum has __getitem__
270
+
271
+ as_enum_value("calculation_method", CalculationMethod)
272
+ as_enum_value("tap_changing_strategy", TapChangingStrategy)
273
+ as_enum_value("short_circuit_voltage_scaling", ShortCircuitVoltageScaling)
274
+ as_enum_value("experimental_features", _ExperimentalFeatures)
275
+
276
+ opt = Options()
277
+ for key, value in kwargs.items():
278
+ setattr(opt, key, value.value if isinstance(value, IntEnum) else value)
279
+ return opt
280
+
281
+ def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_error: bool):
282
+ self._batch_error = handle_errors(
283
+ continue_on_batch_error=continue_on_batch_error,
284
+ batch_size=batch_size,
285
+ decode_error=decode_error,
286
+ )
287
+
288
+ def _calculate_impl( # noqa: PLR0913
289
+ self,
290
+ calculation_type: CalculationType,
291
+ symmetric: bool,
292
+ update_data: Dataset | list[Dataset] | None,
293
+ output_component_types: ComponentAttributeMapping,
294
+ options: Options,
295
+ continue_on_batch_error: bool,
296
+ decode_error: bool,
297
+ experimental_features: _ExperimentalFeatures | str, # NOSONAR # noqa: ARG002
298
+ ) -> Dataset:
299
+ """
300
+ Core calculation routine
301
+
302
+ Args:
303
+ calculation_type:
304
+ symmetric:
305
+ update_data:
306
+ output_component_types:
307
+ options:
308
+ continue_on_batch_error:
309
+ decode_error:
310
+
311
+ Returns:
312
+ """
313
+ self._batch_error = None
314
+ if update_data is None:
315
+ is_batch = False
316
+ update_data = []
317
+ else:
318
+ is_batch = True
319
+ if not isinstance(update_data, list):
320
+ update_data = [update_data]
321
+ update_data = [_map_to_component_types(x) for x in update_data]
322
+ prepared_update = [prepare_update_view(x) for x in update_data]
323
+ for this_dataset, next_dataset in itertools.pairwise(prepared_update):
324
+ this_dataset.set_next_cartesian_product_dimension(next_dataset)
325
+ update_ptr: ConstDatasetPtr = prepared_update[0].get_dataset_ptr() if prepared_update else ConstDatasetPtr()
326
+ batch_size = prod(x.get_info().batch_size() for x in prepared_update)
327
+
328
+ output_data = self._construct_output(
329
+ output_component_types=output_component_types,
330
+ calculation_type=calculation_type,
331
+ symmetric=symmetric,
332
+ is_batch=is_batch,
333
+ batch_size=batch_size,
334
+ )
335
+ prepared_result = prepare_output_view(
336
+ output_data=output_data,
337
+ output_type=get_output_type(calculation_type=calculation_type, symmetric=symmetric),
338
+ )
339
+
340
+ # run calculation
341
+ get_pgc().calculate(
342
+ # model and options
343
+ self._model,
344
+ options.opt,
345
+ output_data=prepared_result.get_dataset_ptr(),
346
+ update_data=update_ptr,
347
+ )
348
+
349
+ self._handle_errors(
350
+ continue_on_batch_error=continue_on_batch_error,
351
+ batch_size=batch_size,
352
+ decode_error=decode_error,
353
+ )
354
+
355
+ return output_data
356
+
357
+ def _calculate_power_flow( # noqa: PLR0913
358
+ self,
359
+ *,
360
+ symmetric: bool = True,
361
+ error_tolerance: float = 1e-8,
362
+ max_iterations: int = 20,
363
+ calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
364
+ update_data: Dataset | list[Dataset] | None = None,
365
+ threading: int = -1,
366
+ output_component_types: ComponentAttributeMapping = None,
367
+ continue_on_batch_error: bool = False,
368
+ decode_error: bool = True,
369
+ tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
370
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
371
+ ) -> Dataset:
372
+ calculation_type = CalculationType.power_flow
373
+ options = self._options(
374
+ calculation_type=calculation_type,
375
+ symmetric=symmetric,
376
+ error_tolerance=error_tolerance,
377
+ max_iterations=max_iterations,
378
+ calculation_method=calculation_method,
379
+ tap_changing_strategy=tap_changing_strategy,
380
+ threading=threading,
381
+ experimental_features=experimental_features,
382
+ )
383
+ return self._calculate_impl(
384
+ calculation_type=calculation_type,
385
+ symmetric=symmetric,
386
+ update_data=update_data,
387
+ output_component_types=output_component_types,
388
+ options=options,
389
+ continue_on_batch_error=continue_on_batch_error,
390
+ decode_error=decode_error,
391
+ experimental_features=experimental_features,
392
+ )
393
+
394
+ def _calculate_state_estimation( # noqa: PLR0913
395
+ self,
396
+ *,
397
+ symmetric: bool = True,
398
+ error_tolerance: float = 1e-8,
399
+ max_iterations: int = 20,
400
+ calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
401
+ update_data: Dataset | list[Dataset] | None = None,
402
+ threading: int = -1,
403
+ output_component_types: ComponentAttributeMapping = None,
404
+ continue_on_batch_error: bool = False,
405
+ decode_error: bool = True,
406
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
407
+ ) -> Dataset:
408
+ calculation_type = CalculationType.state_estimation
409
+ options = self._options(
410
+ calculation_type=calculation_type,
411
+ symmetric=symmetric,
412
+ error_tolerance=error_tolerance,
413
+ max_iterations=max_iterations,
414
+ calculation_method=calculation_method,
415
+ threading=threading,
416
+ experimental_features=experimental_features,
417
+ )
418
+ return self._calculate_impl(
419
+ calculation_type=calculation_type,
420
+ symmetric=symmetric,
421
+ update_data=update_data,
422
+ output_component_types=output_component_types,
423
+ options=options,
424
+ continue_on_batch_error=continue_on_batch_error,
425
+ decode_error=decode_error,
426
+ experimental_features=experimental_features,
427
+ )
428
+
429
+ def _calculate_short_circuit( # noqa: PLR0913
430
+ self,
431
+ *,
432
+ calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
433
+ update_data: Dataset | list[Dataset] | None = None,
434
+ threading: int = -1,
435
+ output_component_types: ComponentAttributeMapping = None,
436
+ continue_on_batch_error: bool = False,
437
+ decode_error: bool = True,
438
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
439
+ experimental_features: _ExperimentalFeatures | str = _ExperimentalFeatures.disabled,
440
+ ) -> Dataset:
441
+ calculation_type = CalculationType.short_circuit
442
+ symmetric = False
443
+
444
+ options = self._options(
445
+ calculation_type=calculation_type,
446
+ symmetric=symmetric,
447
+ calculation_method=calculation_method,
448
+ threading=threading,
449
+ short_circuit_voltage_scaling=short_circuit_voltage_scaling,
450
+ experimental_features=experimental_features,
451
+ )
452
+ return self._calculate_impl(
453
+ calculation_type=calculation_type,
454
+ symmetric=symmetric,
455
+ update_data=update_data,
456
+ output_component_types=output_component_types,
457
+ options=options,
458
+ continue_on_batch_error=continue_on_batch_error,
459
+ decode_error=decode_error,
460
+ experimental_features=experimental_features,
461
+ )
462
+
463
+ @overload
464
+ def calculate_power_flow(
465
+ self,
466
+ *,
467
+ symmetric: bool = ...,
468
+ error_tolerance: float = ...,
469
+ max_iterations: int = ...,
470
+ calculation_method: CalculationMethod | str = ...,
471
+ threading: int = ...,
472
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
473
+ continue_on_batch_error: bool = ...,
474
+ decode_error: bool = ...,
475
+ tap_changing_strategy: TapChangingStrategy | str = ...,
476
+ ) -> SingleRowBasedDataset: ...
477
+ @overload
478
+ def calculate_power_flow(
479
+ self,
480
+ *,
481
+ symmetric: bool = ...,
482
+ error_tolerance: float = ...,
483
+ max_iterations: int = ...,
484
+ calculation_method: CalculationMethod | str = ...,
485
+ update_data: None = ...,
486
+ threading: int = ...,
487
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
488
+ continue_on_batch_error: bool = ...,
489
+ decode_error: bool = ...,
490
+ tap_changing_strategy: TapChangingStrategy | str = ...,
491
+ ) -> SingleRowBasedDataset: ...
492
+ @overload
493
+ def calculate_power_flow(
494
+ self,
495
+ *,
496
+ symmetric: bool = ...,
497
+ error_tolerance: float = ...,
498
+ max_iterations: int = ...,
499
+ calculation_method: CalculationMethod | str = ...,
500
+ update_data: None = ...,
501
+ threading: int = ...,
502
+ output_component_types: ComponentAttributeFilterOptions = ...,
503
+ continue_on_batch_error: bool = ...,
504
+ decode_error: bool = ...,
505
+ tap_changing_strategy: TapChangingStrategy | str = ...,
506
+ ) -> SingleColumnarOutputDataset: ...
507
+ @overload
508
+ def calculate_power_flow(
509
+ self,
510
+ *,
511
+ symmetric: bool = ...,
512
+ error_tolerance: float = ...,
513
+ max_iterations: int = ...,
514
+ calculation_method: CalculationMethod | str = ...,
515
+ update_data: None = ...,
516
+ threading: int = ...,
517
+ output_component_types: ComponentAttributeMappingDict = ...,
518
+ continue_on_batch_error: bool = ...,
519
+ decode_error: bool = ...,
520
+ tap_changing_strategy: TapChangingStrategy | str = ...,
521
+ ) -> SingleOutputDataset: ...
522
+ @overload
523
+ def calculate_power_flow(
524
+ self,
525
+ *,
526
+ symmetric: bool = ...,
527
+ error_tolerance: float = ...,
528
+ max_iterations: int = ...,
529
+ calculation_method: CalculationMethod | str = ...,
530
+ update_data: BatchDataset | list[BatchDataset] = ...,
531
+ threading: int = ...,
532
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
533
+ continue_on_batch_error: bool = ...,
534
+ decode_error: bool = ...,
535
+ tap_changing_strategy: TapChangingStrategy | str = ...,
536
+ ) -> DenseBatchRowBasedOutputDataset: ...
537
+ @overload
538
+ def calculate_power_flow(
539
+ self,
540
+ *,
541
+ symmetric: bool = ...,
542
+ error_tolerance: float = ...,
543
+ max_iterations: int = ...,
544
+ calculation_method: CalculationMethod | str = ...,
545
+ update_data: BatchDataset | list[BatchDataset] = ...,
546
+ threading: int = ...,
547
+ output_component_types: ComponentAttributeFilterOptions = ...,
548
+ continue_on_batch_error: bool = ...,
549
+ decode_error: bool = ...,
550
+ tap_changing_strategy: TapChangingStrategy | str = ...,
551
+ ) -> DenseBatchColumnarOutputDataset: ...
552
+ @overload
553
+ def calculate_power_flow(
554
+ self,
555
+ *,
556
+ symmetric: bool = ...,
557
+ error_tolerance: float = ...,
558
+ max_iterations: int = ...,
559
+ calculation_method: CalculationMethod | str = ...,
560
+ update_data: BatchDataset | list[BatchDataset] = ...,
561
+ threading: int = ...,
562
+ output_component_types: ComponentAttributeMappingDict = ...,
563
+ continue_on_batch_error: bool = ...,
564
+ decode_error: bool = ...,
565
+ tap_changing_strategy: TapChangingStrategy | str = ...,
566
+ ) -> DenseBatchOutputDataset: ...
567
+ def calculate_power_flow( # noqa: PLR0913
568
+ self,
569
+ *,
570
+ symmetric: bool = True,
571
+ error_tolerance: float = 1e-8,
572
+ max_iterations: int = 20,
573
+ calculation_method: CalculationMethod | str = CalculationMethod.newton_raphson,
574
+ update_data: BatchDataset | list[BatchDataset] | None = None,
575
+ threading: int = -1,
576
+ output_component_types: ComponentAttributeMapping = None,
577
+ continue_on_batch_error: bool = False,
578
+ decode_error: bool = True,
579
+ tap_changing_strategy: TapChangingStrategy | str = TapChangingStrategy.disabled,
580
+ ) -> Dataset:
581
+ """
582
+ Calculate power flow once with the current model attributes.
583
+ Or calculate in batch with the given update dataset in batch.
584
+
585
+ Args:
586
+ symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
587
+
588
+ - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
589
+ - False: Three-phase asymmetric calculation.
590
+ error_tolerance (float, optional): Error tolerance for voltage in p.u., applicable only when the
591
+ calculation method is iterative.
592
+ max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
593
+ is iterative.
594
+ calculation_method (an enumeration or string): The calculation method to use.
595
+
596
+ - newton_raphson: Use Newton-Raphson iterative method (default).
597
+ - linear: Use linear method.
598
+ update_data (dict, list of dict, optional):
599
+ None: Calculate power flow once with the current model attributes.
600
+
601
+ Or a dictionary for batch calculation with batch update.
602
+
603
+ - key: Component type name to be updated in batch.
604
+ - value:
605
+
606
+ - For homogeneous update batch (a 2D numpy structured array):
607
+
608
+ - Dimension 0: Each batch.
609
+ - Dimension 1: Each updated element per batch for this component type.
610
+ - For inhomogeneous update batch (a dictionary containing two keys):
611
+
612
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
613
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
614
+ compressed sparse structure.
615
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
616
+ - data: 1D numpy structured array in flat.
617
+ Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product.
618
+ The calculation core will interpret these datasets as a cartesian product of all the scenarios.
619
+ Each batch dataset in the list represents one dimension of the cartesian product.
620
+ The output will then have row size equal to the product of the batch sizes of all these datasets,
621
+ in 1D flat structure.
622
+ E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively,
623
+ and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5).
624
+ threading (int, optional): Applicable only for batch calculation.
625
+
626
+ - < 0: Sequential
627
+ - = 0: Parallel, use number of hardware threads
628
+ - > 0: Specify number of parallel threads
629
+ output_component_types (ComponentAttributeMapping):
630
+
631
+ - None: Row based data for all component types.
632
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
633
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
634
+ - ComponentAttributeMappingDict:
635
+ key: ComponentType
636
+ value:
637
+ - None: Row based data for the specified component types.
638
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
639
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
640
+ continue_on_batch_error (bool, optional):
641
+ Continue the program (instead of throwing error) if some scenarios fail.
642
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
643
+ decode_error (bool, optional):
644
+ Decode error messages to their derived types if possible.
645
+
646
+ Returns:
647
+ Dictionary of results of all components.
648
+
649
+ - key: Component type name to be updated in batch.
650
+ - value:
651
+
652
+ - For single calculation: 1D numpy structured array for the results of this component type.
653
+ - For batch calculation: 2D numpy structured array for the results of this component type.
654
+
655
+ - Dimension 0: Each batch.
656
+ - Dimension 1: The result of each element for this component type.
657
+
658
+ Raises:
659
+ Exception: In case an error in the core occurs, an exception will be thrown.
660
+ """
661
+ return self._calculate_power_flow(
662
+ symmetric=symmetric,
663
+ error_tolerance=error_tolerance,
664
+ max_iterations=max_iterations,
665
+ calculation_method=calculation_method,
666
+ update_data=update_data,
667
+ threading=threading,
668
+ output_component_types=output_component_types,
669
+ continue_on_batch_error=continue_on_batch_error,
670
+ decode_error=decode_error,
671
+ tap_changing_strategy=tap_changing_strategy,
672
+ )
673
+
674
+ @overload
675
+ def calculate_state_estimation(
676
+ self,
677
+ *,
678
+ symmetric: bool = ...,
679
+ error_tolerance: float = ...,
680
+ max_iterations: int = ...,
681
+ calculation_method: CalculationMethod | str = ...,
682
+ update_data: None = ...,
683
+ threading: int = ...,
684
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
685
+ continue_on_batch_error: bool = ...,
686
+ decode_error: bool = ...,
687
+ ) -> SingleRowBasedOutputDataset: ...
688
+ @overload
689
+ def calculate_state_estimation(
690
+ self,
691
+ *,
692
+ symmetric: bool = ...,
693
+ error_tolerance: float = ...,
694
+ max_iterations: int = ...,
695
+ calculation_method: CalculationMethod | str = ...,
696
+ update_data: None = ...,
697
+ threading: int = ...,
698
+ output_component_types: ComponentAttributeFilterOptions = ...,
699
+ continue_on_batch_error: bool = ...,
700
+ decode_error: bool = ...,
701
+ ) -> SingleColumnarOutputDataset: ...
702
+ @overload
703
+ def calculate_state_estimation(
704
+ self,
705
+ *,
706
+ symmetric: bool = ...,
707
+ error_tolerance: float = ...,
708
+ max_iterations: int = ...,
709
+ calculation_method: CalculationMethod | str = ...,
710
+ update_data: None = ...,
711
+ threading: int = ...,
712
+ output_component_types: ComponentAttributeMappingDict = ...,
713
+ continue_on_batch_error: bool = ...,
714
+ decode_error: bool = ...,
715
+ ) -> SingleOutputDataset: ...
716
+ @overload
717
+ def calculate_state_estimation(
718
+ self,
719
+ *,
720
+ symmetric: bool = ...,
721
+ error_tolerance: float = ...,
722
+ max_iterations: int = ...,
723
+ calculation_method: CalculationMethod | str = ...,
724
+ update_data: BatchDataset | list[BatchDataset] = ...,
725
+ threading: int = ...,
726
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
727
+ continue_on_batch_error: bool = ...,
728
+ decode_error: bool = ...,
729
+ ) -> DenseBatchRowBasedOutputDataset: ...
730
+ @overload
731
+ def calculate_state_estimation(
732
+ self,
733
+ *,
734
+ symmetric: bool = ...,
735
+ error_tolerance: float = ...,
736
+ max_iterations: int = ...,
737
+ calculation_method: CalculationMethod | str = ...,
738
+ update_data: BatchDataset | list[BatchDataset] = ...,
739
+ threading: int = ...,
740
+ output_component_types: ComponentAttributeFilterOptions = ...,
741
+ continue_on_batch_error: bool = ...,
742
+ decode_error: bool = ...,
743
+ ) -> DenseBatchColumnarOutputDataset: ...
744
+ @overload
745
+ def calculate_state_estimation(
746
+ self,
747
+ *,
748
+ symmetric: bool = ...,
749
+ error_tolerance: float = ...,
750
+ max_iterations: int = ...,
751
+ calculation_method: CalculationMethod | str = ...,
752
+ update_data: BatchDataset | list[BatchDataset] = ...,
753
+ threading: int = ...,
754
+ output_component_types: ComponentAttributeMappingDict = ...,
755
+ continue_on_batch_error: bool = ...,
756
+ decode_error: bool = ...,
757
+ ) -> DenseBatchOutputDataset: ...
758
+ def calculate_state_estimation( # noqa: PLR0913
759
+ self,
760
+ *,
761
+ symmetric: bool = True,
762
+ error_tolerance: float = 1e-8,
763
+ max_iterations: int = 20,
764
+ calculation_method: CalculationMethod | str = CalculationMethod.iterative_linear,
765
+ update_data: BatchDataset | list[BatchDataset] | None = None,
766
+ threading: int = -1,
767
+ output_component_types: ComponentAttributeMapping = None,
768
+ continue_on_batch_error: bool = False,
769
+ decode_error: bool = True,
770
+ ) -> Dataset:
771
+ """
772
+ Calculate state estimation once with the current model attributes.
773
+ Or calculate in batch with the given update dataset in batch.
774
+
775
+ Args:
776
+ symmetric (bool, optional): Whether to perform a three-phase symmetric calculation.
777
+
778
+ - True: Three-phase symmetric calculation, even for asymmetric loads/generations (Default).
779
+ - False: Three-phase asymmetric calculation.
780
+ error_tolerance (float, optional): error tolerance for voltage in p.u., only applicable when the
781
+ calculation method is iterative.
782
+ max_iterations (int, optional): Maximum number of iterations, applicable only when the calculation method
783
+ is iterative.
784
+ calculation_method (an enumeration): Use iterative linear method.
785
+ update_data (dict, optional):
786
+ None: Calculate state estimation once with the current model attributes.
787
+
788
+ Or a dictionary for batch calculation with batch update.
789
+
790
+ - key: Component type name to be updated in batch.
791
+ - value:
792
+
793
+ - For homogeneous update batch (a 2D numpy structured array):
794
+
795
+ - Dimension 0: Each batch.
796
+ - Dimension 1: Each updated element per batch for this component type.
797
+ - For inhomogeneous update batch (a dictionary containing two keys):
798
+
799
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
800
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
801
+ compressed sparse structure.
802
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
803
+ - data: 1D numpy structured array in flat.
804
+ Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product.
805
+ The calculation core will interpret these datasets as a cartesian product of all the scenarios.
806
+ Each batch dataset in the list represents one dimension of the cartesian product.
807
+ The output will then have row size equal to the product of the batch sizes of all these datasets,
808
+ in 1D flat structure.
809
+ E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively,
810
+ and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5).
811
+ threading (int, optional): Applicable only for batch calculation.
812
+
813
+ - < 0: Sequential
814
+ - = 0: Parallel, use number of hardware threads
815
+ - > 0: Specify number of parallel threads
816
+ output_component_types (ComponentAttributeMapping):
817
+
818
+ - None: Row based data for all component types.
819
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
820
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
821
+ - ComponentAttributeMappingDict:
822
+ key: ComponentType
823
+ value:
824
+ - None: Row based data for the specified component types.
825
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
826
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
827
+ continue_on_batch_error (bool, optional):
828
+ Continue the program (instead of throwing error) if some scenarios fail.
829
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
830
+ decode_error (bool, optional):
831
+ Decode error messages to their derived types if possible.
832
+
833
+ Returns:
834
+ Dictionary of results of all components.
835
+
836
+ - key: Component type name to be updated in batch.
837
+ - value:
838
+
839
+ - For single calculation: 1D numpy structured array for the results of this component type.
840
+ - For batch calculation: 2D numpy structured array for the results of this component type.
841
+
842
+ - Dimension 0: Each batch.
843
+ - Dimension 1: The result of each element for this component type.
844
+
845
+ Raises:
846
+ Exception: In case an error in the core occurs, an exception will be thrown.
847
+ """
848
+ return self._calculate_state_estimation(
849
+ symmetric=symmetric,
850
+ error_tolerance=error_tolerance,
851
+ max_iterations=max_iterations,
852
+ calculation_method=calculation_method,
853
+ update_data=update_data,
854
+ threading=threading,
855
+ output_component_types=output_component_types,
856
+ continue_on_batch_error=continue_on_batch_error,
857
+ decode_error=decode_error,
858
+ )
859
+
860
+ @overload
861
+ def calculate_short_circuit(
862
+ self,
863
+ *,
864
+ calculation_method: CalculationMethod | str = ...,
865
+ update_data: None = ...,
866
+ threading: int = ...,
867
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
868
+ continue_on_batch_error: bool = ...,
869
+ decode_error: bool = ...,
870
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
871
+ ) -> SingleRowBasedDataset: ...
872
+ @overload
873
+ def calculate_short_circuit(
874
+ self,
875
+ *,
876
+ calculation_method: CalculationMethod | str = ...,
877
+ update_data: None = ...,
878
+ threading: int = ...,
879
+ output_component_types: ComponentAttributeFilterOptions = ...,
880
+ continue_on_batch_error: bool = ...,
881
+ decode_error: bool = ...,
882
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
883
+ ) -> SingleColumnarOutputDataset: ...
884
+ @overload
885
+ def calculate_short_circuit(
886
+ self,
887
+ *,
888
+ calculation_method: CalculationMethod | str = ...,
889
+ update_data: None = ...,
890
+ threading: int = ...,
891
+ output_component_types: ComponentAttributeMappingDict = ...,
892
+ continue_on_batch_error: bool = ...,
893
+ decode_error: bool = ...,
894
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
895
+ ) -> SingleOutputDataset: ...
896
+ @overload
897
+ def calculate_short_circuit(
898
+ self,
899
+ *,
900
+ calculation_method: CalculationMethod | str = ...,
901
+ update_data: BatchDataset | list[BatchDataset] = ...,
902
+ threading: int = ...,
903
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar] = ...,
904
+ continue_on_batch_error: bool = ...,
905
+ decode_error: bool = ...,
906
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
907
+ ) -> DenseBatchRowBasedOutputDataset: ...
908
+ @overload
909
+ def calculate_short_circuit(
910
+ self,
911
+ *,
912
+ calculation_method: CalculationMethod | str = ...,
913
+ update_data: BatchDataset | list[BatchDataset] = ...,
914
+ threading: int = ...,
915
+ output_component_types: ComponentAttributeFilterOptions = ...,
916
+ continue_on_batch_error: bool = ...,
917
+ decode_error: bool = ...,
918
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
919
+ ) -> DenseBatchColumnarOutputDataset: ...
920
+ @overload
921
+ def calculate_short_circuit(
922
+ self,
923
+ *,
924
+ calculation_method: CalculationMethod | str = ...,
925
+ update_data: BatchDataset | list[BatchDataset] = ...,
926
+ threading: int = ...,
927
+ output_component_types: ComponentAttributeMappingDict = ...,
928
+ continue_on_batch_error: bool = ...,
929
+ decode_error: bool = ...,
930
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str,
931
+ ) -> DenseBatchOutputDataset: ...
932
+ def calculate_short_circuit( # noqa: PLR0913
933
+ self,
934
+ *,
935
+ calculation_method: CalculationMethod | str = CalculationMethod.iec60909,
936
+ update_data: BatchDataset | list[BatchDataset] | None = None,
937
+ threading: int = -1,
938
+ output_component_types: ComponentAttributeMapping = None,
939
+ continue_on_batch_error: bool = False,
940
+ decode_error: bool = True,
941
+ short_circuit_voltage_scaling: ShortCircuitVoltageScaling | str = ShortCircuitVoltageScaling.maximum,
942
+ ) -> Dataset:
943
+ """
944
+ Calculate a short circuit once with the current model attributes.
945
+ Or calculate in batch with the given update dataset in batch
946
+
947
+ Args:
948
+ calculation_method (an enumeration): Use the iec60909 standard.
949
+ update_data:
950
+ None: calculate a short circuit once with the current model attributes.
951
+
952
+ Or a dictionary for batch calculation with batch update
953
+
954
+ - key: Component type name to be updated in batch
955
+ - value:
956
+
957
+ - For homogeneous update batch (a 2D numpy structured array):
958
+
959
+ - Dimension 0: each batch
960
+ - Dimension 1: each updated element per batch for this component type
961
+ - For inhomogeneous update batch (a dictionary containing two keys):
962
+
963
+ - indptr: A 1D numpy int64 array with length n_batch + 1. Given batch number k, the
964
+ update array for this batch is data[indptr[k]:indptr[k + 1]]. This is the concept of
965
+ compressed sparse structure.
966
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
967
+ - data: 1D numpy structured array in flat.
968
+ Or a list of such dictionaries (batch datasets) to represent multiple dimensions of cartesian product.
969
+ The calculation core will interpret these datasets as a cartesian product of all the scenarios.
970
+ Each batch dataset in the list represents one dimension of the cartesian product.
971
+ The output will then have row size equal to the product of the batch sizes of all these datasets,
972
+ in 1D flat structure.
973
+ E.g., if you have three batch datasets with batch sizes 2, 3, and 4 respectively,
974
+ and the number of nodes is 5, the final output for nodes will have shape (2*3*4, 5).
975
+ threading (int, optional): Applicable only for batch calculation.
976
+
977
+ - < 0: Sequential
978
+ - = 0: Parallel, use number of hardware threads
979
+ - > 0: Specify number of parallel threads
980
+ output_component_types (ComponentAttributeMapping):
981
+
982
+ - None: Row based data for all component types.
983
+ - set[ComponentTypeVar] or list[ComponentTypeVar]: Row based data for the specified component types.
984
+ - ComponentAttributeFilterOptions: Columnar data for all component types.
985
+ - ComponentAttributeMappingDict:
986
+ key: ComponentType
987
+ value:
988
+ - None: Row based data for the specified component types.
989
+ - ComponentAttributeFilterOptions: Columnar data for the specified component types.
990
+ - set[str] | list[str]: Columnar data for the specified component types and attributes.
991
+ continue_on_batch_error (bool, optional):
992
+ Continue the program (instead of throwing error) if some scenarios fail.
993
+ You can still retrieve the errors and succeeded/failed scenarios via the batch_error.
994
+ decode_error (bool, optional):
995
+ Decode error messages to their derived types if possible.
996
+ short_circuit_voltage_scaling ({ShortCircuitVoltageSaling, str}, optional):
997
+ Whether to use the maximum or minimum voltage scaling.
998
+ By default, the maximum voltage scaling is used to calculate the short circuit.
999
+
1000
+ Returns:
1001
+ Dictionary of results of all components.
1002
+
1003
+ - key: Component type name to be updated in batch.
1004
+ - value:
1005
+
1006
+ - For single calculation: 1D numpy structured array for the results of this component type.
1007
+ - For batch calculation: 2D numpy structured array for the results of this component type.
1008
+
1009
+ - Dimension 0: Each batch.
1010
+ - Dimension 1: The result of each element for this component type.
1011
+ Raises:
1012
+ Exception: In case an error in the core occurs, an exception will be thrown.
1013
+ """
1014
+ return self._calculate_short_circuit(
1015
+ calculation_method=calculation_method,
1016
+ update_data=update_data,
1017
+ threading=threading,
1018
+ output_component_types=output_component_types,
1019
+ continue_on_batch_error=continue_on_batch_error,
1020
+ decode_error=decode_error,
1021
+ short_circuit_voltage_scaling=short_circuit_voltage_scaling,
1022
+ )
1023
+
1024
+ def __del__(self):
1025
+ get_pgc().destroy_model(self._model_ptr)