power-grid-model 1.12.10__py3-none-musllinux_1_2_x86_64.whl → 1.12.112__py3-none-musllinux_1_2_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of power-grid-model might be problematic. Click here for more details.

Files changed (37) hide show
  1. power_grid_model/_core/buffer_handling.py +24 -4
  2. power_grid_model/_core/data_handling.py +65 -11
  3. power_grid_model/_core/data_types.py +21 -11
  4. power_grid_model/_core/dataset_definitions.py +5 -5
  5. power_grid_model/_core/error_handling.py +39 -30
  6. power_grid_model/_core/errors.py +4 -0
  7. power_grid_model/_core/options.py +12 -12
  8. power_grid_model/_core/power_grid_core.py +20 -6
  9. power_grid_model/_core/power_grid_dataset.py +37 -27
  10. power_grid_model/_core/power_grid_meta.py +37 -18
  11. power_grid_model/_core/power_grid_model.py +381 -42
  12. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/basics.h +4 -4
  13. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset.h +16 -0
  14. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/dataset_definitions.h +8 -0
  15. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/handle.h +12 -0
  16. power_grid_model/_core/power_grid_model_c/include/power_grid_model_c/model.h +5 -0
  17. power_grid_model/_core/power_grid_model_c/include/power_grid_model_cpp/dataset.hpp +4 -0
  18. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfig.cmake +3 -3
  19. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelConfigVersion.cmake +3 -3
  20. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelTargets-release.cmake +3 -3
  21. power_grid_model/_core/power_grid_model_c/lib/cmake/power_grid_model/power_grid_modelTargets.cmake +1 -1
  22. power_grid_model/_core/power_grid_model_c/lib/libpower_grid_model_c.so +0 -0
  23. power_grid_model/_core/power_grid_model_c/lib/libpower_grid_model_c.so.1.12.112 +0 -0
  24. power_grid_model/_core/serialization.py +11 -9
  25. power_grid_model/_core/typing.py +2 -2
  26. power_grid_model/_core/utils.py +13 -8
  27. power_grid_model/utils.py +67 -7
  28. power_grid_model/validation/_rules.py +7 -11
  29. power_grid_model/validation/errors.py +1 -1
  30. power_grid_model/validation/utils.py +4 -5
  31. {power_grid_model-1.12.10.dist-info → power_grid_model-1.12.112.dist-info}/METADATA +4 -22
  32. {power_grid_model-1.12.10.dist-info → power_grid_model-1.12.112.dist-info}/RECORD +36 -35
  33. {power_grid_model-1.12.10.dist-info → power_grid_model-1.12.112.dist-info}/WHEEL +1 -1
  34. power_grid_model-1.12.112.dist-info/sboms/auditwheel.cdx.json +1 -0
  35. power_grid_model/_core/power_grid_model_c/lib/libpower_grid_model_c.so.1.12.10 +0 -0
  36. {power_grid_model-1.12.10.dist-info → power_grid_model-1.12.112.dist-info}/entry_points.txt +0 -0
  37. {power_grid_model-1.12.10.dist-info → power_grid_model-1.12.112.dist-info}/licenses/LICENSE +0 -0
@@ -7,15 +7,19 @@ Power grid model buffer handler
7
7
  """
8
8
 
9
9
  from dataclasses import dataclass
10
- from typing import cast
10
+ from typing import cast, overload
11
11
 
12
12
  import numpy as np
13
13
 
14
14
  from power_grid_model._core.data_types import (
15
15
  AttributeType,
16
16
  ComponentData,
17
+ DenseBatchArray,
18
+ DenseBatchColumnarData,
17
19
  DenseBatchData,
18
20
  IndexPointer,
21
+ SingleArray,
22
+ SingleColumnarData,
19
23
  SingleComponentData,
20
24
  SparseBatchArray,
21
25
  SparseBatchData,
@@ -88,6 +92,10 @@ def _get_raw_data_view(data: np.ndarray, dtype: np.dtype) -> VoidPtr:
88
92
  return np.ascontiguousarray(data, dtype=dtype).ctypes.data_as(VoidPtr)
89
93
 
90
94
 
95
+ @overload
96
+ def _get_raw_component_data_view(data: np.ndarray, schema: ComponentMetaData) -> VoidPtr: ...
97
+ @overload
98
+ def _get_raw_component_data_view(data: dict[AttributeType, np.ndarray], schema: ComponentMetaData) -> None: ...
91
99
  def _get_raw_component_data_view(
92
100
  data: np.ndarray | dict[AttributeType, np.ndarray], schema: ComponentMetaData
93
101
  ) -> VoidPtr | None:
@@ -200,9 +208,15 @@ def _get_dense_buffer_properties(
200
208
  n_total_elements = actual_batch_size * n_elements_per_scenario
201
209
 
202
210
  if is_batch is not None and is_batch != actual_is_batch:
203
- raise ValueError(f"Provided 'is batch' is incorrect for the provided data. {VALIDATOR_MSG}")
211
+ raise ValueError(
212
+ f"Incorrect/inconsistent data provided: {'batch' if actual_is_batch else 'single'} "
213
+ f"data provided but {'batch' if is_batch else 'single'} data expected. {VALIDATOR_MSG}"
214
+ )
204
215
  if batch_size is not None and batch_size != actual_batch_size:
205
- raise ValueError(f"Provided 'batch size' is incorrect for the provided data. {VALIDATOR_MSG}")
216
+ raise ValueError(
217
+ f"Incorrect/inconsistent batch size provided: {actual_batch_size} scenarios provided "
218
+ f"but {batch_size} scenarios expected. {VALIDATOR_MSG}"
219
+ )
206
220
 
207
221
  return BufferProperties(
208
222
  is_sparse=is_sparse_property,
@@ -480,7 +494,13 @@ def _create_sparse_buffer(properties: BufferProperties, schema: ComponentMetaDat
480
494
  return cast(SparseBatchData, {"data": data, "indptr": indptr})
481
495
 
482
496
 
483
- def _create_contents_buffer(shape, dtype, columns: list[AttributeType] | None) -> SingleComponentData | DenseBatchData:
497
+ @overload
498
+ def _create_contents_buffer(shape, dtype, columns: None) -> SingleArray | DenseBatchArray: ...
499
+ @overload
500
+ def _create_contents_buffer(
501
+ shape, dtype, columns: list[AttributeType]
502
+ ) -> SingleColumnarData | DenseBatchColumnarData: ...
503
+ def _create_contents_buffer(shape, dtype, columns):
484
504
  if columns is None:
485
505
  return np.empty(shape=shape, dtype=dtype)
486
506
 
@@ -6,16 +6,26 @@
6
6
  Data handling
7
7
  """
8
8
 
9
+ from typing import Literal, overload
10
+
9
11
  import numpy as np
10
12
 
11
- from power_grid_model._core.data_types import Dataset, SingleDataset
12
- from power_grid_model._core.dataset_definitions import ComponentType, DatasetType
13
+ from power_grid_model._core.data_types import (
14
+ BatchDataset,
15
+ Dataset,
16
+ DenseBatchArray,
17
+ SingleArray,
18
+ SingleColumnarData,
19
+ SingleDataset,
20
+ )
21
+ from power_grid_model._core.dataset_definitions import ComponentType, ComponentTypeVar, DatasetType
13
22
  from power_grid_model._core.enum import CalculationType, ComponentAttributeFilterOptions
14
23
  from power_grid_model._core.errors import PowerGridUnreachableHitError
15
24
  from power_grid_model._core.power_grid_dataset import CConstDataset, CMutableDataset
16
25
  from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data
17
- from power_grid_model._core.typing import ComponentAttributeMapping
26
+ from power_grid_model._core.typing import ComponentAttributeMapping, ComponentAttributeMappingDict
18
27
  from power_grid_model._core.utils import process_data_filter
28
+ from power_grid_model.data_types import DenseBatchColumnarData
19
29
 
20
30
 
21
31
  def get_output_type(*, calculation_type: CalculationType, symmetric: bool) -> DatasetType:
@@ -85,6 +95,54 @@ def prepare_output_view(output_data: Dataset, output_type: DatasetType) -> CMuta
85
95
  return CMutableDataset(output_data, dataset_type=output_type)
86
96
 
87
97
 
98
+ @overload
99
+ def create_output_data(
100
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar],
101
+ output_type: DatasetType,
102
+ all_component_count: dict[ComponentType, int],
103
+ is_batch: Literal[False],
104
+ batch_size: int,
105
+ ) -> dict[ComponentType, SingleArray]: ...
106
+ @overload
107
+ def create_output_data(
108
+ output_component_types: None | set[ComponentTypeVar] | list[ComponentTypeVar],
109
+ output_type: DatasetType,
110
+ all_component_count: dict[ComponentType, int],
111
+ is_batch: Literal[True],
112
+ batch_size: int,
113
+ ) -> dict[ComponentType, DenseBatchArray]: ...
114
+ @overload
115
+ def create_output_data(
116
+ output_component_types: ComponentAttributeFilterOptions,
117
+ output_type: DatasetType,
118
+ all_component_count: dict[ComponentType, int],
119
+ is_batch: Literal[False],
120
+ batch_size: int,
121
+ ) -> dict[ComponentType, SingleColumnarData]: ...
122
+ @overload
123
+ def create_output_data(
124
+ output_component_types: ComponentAttributeFilterOptions,
125
+ output_type: DatasetType,
126
+ all_component_count: dict[ComponentType, int],
127
+ is_batch: Literal[True],
128
+ batch_size: int,
129
+ ) -> dict[ComponentType, DenseBatchColumnarData]: ...
130
+ @overload
131
+ def create_output_data(
132
+ output_component_types: ComponentAttributeMappingDict,
133
+ output_type: DatasetType,
134
+ all_component_count: dict[ComponentType, int],
135
+ is_batch: Literal[False],
136
+ batch_size: int,
137
+ ) -> SingleDataset: ...
138
+ @overload
139
+ def create_output_data(
140
+ output_component_types: ComponentAttributeMappingDict,
141
+ output_type: DatasetType,
142
+ all_component_count: dict[ComponentType, int],
143
+ is_batch: Literal[True],
144
+ batch_size: int,
145
+ ) -> BatchDataset: ...
88
146
  def create_output_data(
89
147
  output_component_types: ComponentAttributeMapping,
90
148
  output_type: DatasetType,
@@ -96,7 +154,7 @@ def create_output_data(
96
154
  Create the output dataset based on component and batch size from the model; and output attributes requested by user.
97
155
 
98
156
  Args:
99
- output_component_types:
157
+ output_component_types (ComponentAttributeMapping):
100
158
  the output components the user seeks to extract
101
159
  output_type:
102
160
  the type of output that the user will see (as per the calculation options)
@@ -118,11 +176,7 @@ def create_output_data(
118
176
  result_dict: Dataset = {}
119
177
 
120
178
  for name, count in all_component_count.items():
121
- # shape
122
- if is_batch:
123
- shape: tuple[int] | tuple[int, int] = (batch_size, count)
124
- else:
125
- shape = (count,)
179
+ shape: tuple[int, int] | int = (batch_size, count) if is_batch else count
126
180
 
127
181
  requested_component = processed_output_types[name]
128
182
  dtype = power_grid_meta_data[output_type][name].dtype
@@ -134,8 +188,8 @@ def create_output_data(
134
188
  ComponentAttributeFilterOptions.everything,
135
189
  ComponentAttributeFilterOptions.relevant,
136
190
  ]:
137
- result_dict[name] = {attr: np.empty(shape, dtype=dtype[attr]) for attr in dtype.names}
191
+ result_dict[name] = {attr: np.empty(shape=shape, dtype=dtype[attr]) for attr in dtype.names}
138
192
  elif isinstance(requested_component, list | set):
139
- result_dict[name] = {attr: np.empty(shape, dtype=dtype[attr]) for attr in requested_component}
193
+ result_dict[name] = {attr: np.empty(shape=shape, dtype=dtype[attr]) for attr in requested_component}
140
194
 
141
195
  return result_dict
@@ -9,35 +9,35 @@ Data types for library-internal use. In an attempt to clarify type hints, some t
9
9
  have been defined and explained in this file.
10
10
  """
11
11
 
12
- from typing import TypeAlias, TypedDict, TypeVar
12
+ from typing import TypedDict, TypeVar
13
13
 
14
14
  import numpy as np
15
15
 
16
- from power_grid_model._core.dataset_definitions import ComponentTypeVar
16
+ from power_grid_model._core.dataset_definitions import ComponentType, ComponentTypeVar
17
17
 
18
- SingleArray: TypeAlias = np.ndarray
18
+ type SingleArray = np.ndarray
19
19
 
20
- AttributeType: TypeAlias = str
20
+ type AttributeType = str
21
21
 
22
- SingleColumn: TypeAlias = np.ndarray
23
-
24
- DenseBatchArray: TypeAlias = np.ndarray
22
+ type SingleColumn = np.ndarray
23
+ type DenseBatchArray = np.ndarray
25
24
 
26
25
  SingleColumnarData = dict[AttributeType, SingleColumn]
27
26
 
28
27
  _SingleComponentData = TypeVar("_SingleComponentData", SingleArray, SingleColumnarData) # deduction helper
29
28
  SingleComponentData = SingleArray | SingleColumnarData
30
29
 
31
-
30
+ SingleRowBasedDataset = dict[ComponentTypeVar, SingleArray]
31
+ SingleColumnarDataset = dict[ComponentTypeVar, SingleColumnarData]
32
32
  SingleDataset = dict[ComponentTypeVar, _SingleComponentData]
33
33
 
34
34
  BatchList = list[SingleDataset]
35
35
 
36
- BatchColumn: TypeAlias = np.ndarray
36
+ type BatchColumn = np.ndarray
37
37
 
38
38
  DenseBatchColumnarData = dict[AttributeType, BatchColumn]
39
39
 
40
- IndexPointer: TypeAlias = np.ndarray
40
+ type IndexPointer = np.ndarray
41
41
 
42
42
 
43
43
  class SparseBatchColumnarData(TypedDict):
@@ -86,7 +86,7 @@ class SparseBatchArray(TypedDict):
86
86
 
87
87
  SparseBatchData = SparseBatchArray | SparseBatchColumnarData
88
88
 
89
- SparseDataComponentType: TypeAlias = str
89
+ type SparseDataComponentType = str
90
90
 
91
91
  BatchColumnarData = DenseBatchColumnarData | SparseBatchColumnarData
92
92
 
@@ -113,6 +113,16 @@ Dataset = dict[ComponentTypeVar, _ComponentData]
113
113
 
114
114
  DenseBatchData = DenseBatchArray | DenseBatchColumnarData
115
115
 
116
+ # overloads that only match on latest PGM type
117
+ SingleRowBasedOutputDataset = dict[ComponentType, SingleArray]
118
+ SingleColumnarOutputDataset = dict[ComponentType, SingleColumnarData]
119
+ SingleOutputDataset = dict[ComponentType, SingleComponentData]
120
+ DenseBatchRowBasedOutputDataset = dict[ComponentType, DenseBatchArray]
121
+ DenseBatchColumnarOutputDataset = dict[ComponentType, DenseBatchColumnarData]
122
+ DenseBatchOutputDataset = dict[ComponentType, DenseBatchData]
123
+ OutputDataset = dict[ComponentType, ComponentData]
124
+
125
+
116
126
  NominalValue = int
117
127
 
118
128
  RealValue = float
@@ -8,7 +8,7 @@
8
8
 
9
9
  from collections.abc import Mapping
10
10
  from enum import Enum, EnumMeta
11
- from typing import Any, TypeAlias, TypeVar
11
+ from typing import TypeVar
12
12
 
13
13
  # fmt: off
14
14
 
@@ -76,10 +76,10 @@ class ComponentType(str, Enum, metaclass=_MetaEnum):
76
76
  fault = "fault"
77
77
 
78
78
 
79
- DatasetTypeLike: TypeAlias = DatasetType | str
79
+ type DatasetTypeLike = DatasetType | str
80
80
  DatasetTypeVar = TypeVar("DatasetTypeVar", bound=DatasetTypeLike) # helper used for type deduction
81
81
 
82
- ComponentTypeLike: TypeAlias = ComponentType | str
82
+ type ComponentTypeLike = ComponentType | str
83
83
  ComponentTypeVar = TypeVar("ComponentTypeVar", bound=ComponentTypeLike) # helper used for type deduction
84
84
 
85
85
 
@@ -90,7 +90,7 @@ def _str_to_datatype(data_type: DatasetTypeLike) -> DatasetType:
90
90
  return DatasetType[data_type]
91
91
 
92
92
 
93
- def _map_to_datatypes(data: Mapping[DatasetTypeVar, Any]) -> dict[DatasetType, Any]:
93
+ def _map_to_datatypes[K: DatasetTypeLike, V](data: Mapping[K, V]) -> dict[DatasetType, V]:
94
94
  """Helper function to map datatype str keys to DatasetType."""
95
95
  return {_str_to_datatype(key): value for key, value in data.items()}
96
96
 
@@ -102,7 +102,7 @@ def _str_to_component_type(component: ComponentTypeLike) -> ComponentType:
102
102
  return ComponentType[component]
103
103
 
104
104
 
105
- def _map_to_component_types(data: Mapping[ComponentTypeVar, Any]) -> dict[ComponentType, Any]:
105
+ def _map_to_component_types[K: ComponentTypeLike, V](data: Mapping[K, V]) -> dict[ComponentType, V]:
106
106
  """Helper function to map componenttype str keys to ComponentType."""
107
107
  return {_str_to_component_type(key): value for key, value in data.items()}
108
108
 
@@ -7,6 +7,7 @@ Error handling
7
7
  """
8
8
 
9
9
  import re
10
+ from enum import IntEnum
10
11
 
11
12
  import numpy as np
12
13
 
@@ -33,6 +34,7 @@ from power_grid_model._core.errors import (
33
34
  PowerGridBatchError,
34
35
  PowerGridDatasetError,
35
36
  PowerGridError,
37
+ PowerGridIllegalOperationError,
36
38
  PowerGridNotImplementedError,
37
39
  PowerGridSerializationError,
38
40
  PowerGridUnreachableHitError,
@@ -40,14 +42,17 @@ from power_grid_model._core.errors import (
40
42
  TapSearchStrategyIncompatibleError,
41
43
  )
42
44
  from power_grid_model._core.index_integer import IdxNp
43
- from power_grid_model._core.power_grid_core import power_grid_core as pgc
45
+ from power_grid_model._core.power_grid_core import get_power_grid_core as get_pgc
44
46
 
45
47
  VALIDATOR_MSG = "\nTry validate_input_data() or validate_batch_data() to validate your data.\n"
46
- # error codes
47
- PGM_NO_ERROR = 0
48
- PGM_REGULAR_ERROR = 1
49
- PGM_BATCH_ERROR = 2
50
- PGM_SERIALIZATION_ERROR = 3
48
+
49
+
50
+ class _PgmCErrorCode(IntEnum):
51
+ NO_ERROR = 0
52
+ REGULAR_ERROR = 1
53
+ BATCH_ERROR = 2
54
+ SERIALIZATION_ERROR = 3
55
+
51
56
 
52
57
  _MISSING_CASE_FOR_ENUM_RE = re.compile(r" is not implemented for (.+) #(-?\d+)!\n")
53
58
  _INVALID_ARGUMENTS_RE = re.compile(r" is not implemented for ") # multiple different flavors
@@ -82,6 +87,7 @@ _INVALID_CALCULATION_METHOD_RE = re.compile(r"The calculation method is invalid
82
87
  _INVALID_SHORT_CIRCUIT_PHASE_OR_TYPE_RE = re.compile(r"short circuit type") # multiple different flavors
83
88
  _TAP_STRATEGY_SEARCH_OPT_INCMPT_RE = re.compile(r"Search method is incompatible with optimization strategy: ")
84
89
  _POWER_GRID_DATASET_ERROR_RE = re.compile(r"Dataset error: ") # multiple different flavors
90
+ _POWER_GRID_ILLEGAL_OPERATION_ERROR_RE = re.compile(r"Illegal operation: ") # multiple different flavors
85
91
  _POWER_GRID_UNREACHABLE_HIT_RE = re.compile(r"Unreachable code hit when executing ") # multiple different flavors
86
92
  _POWER_GRID_NOT_IMPLEMENTED_ERROR_RE = re.compile(r"The functionality is either not supported or not yet implemented!")
87
93
 
@@ -108,6 +114,7 @@ _ERROR_MESSAGE_PATTERNS = {
108
114
  _INVALID_SHORT_CIRCUIT_PHASE_OR_TYPE_RE: InvalidShortCircuitPhaseOrType,
109
115
  _TAP_STRATEGY_SEARCH_OPT_INCMPT_RE: TapSearchStrategyIncompatibleError,
110
116
  _POWER_GRID_DATASET_ERROR_RE: PowerGridDatasetError,
117
+ _POWER_GRID_ILLEGAL_OPERATION_ERROR_RE: PowerGridIllegalOperationError,
111
118
  _POWER_GRID_UNREACHABLE_HIT_RE: PowerGridUnreachableHitError,
112
119
  _POWER_GRID_NOT_IMPLEMENTED_ERROR_RE: PowerGridNotImplementedError,
113
120
  }
@@ -133,30 +140,32 @@ def find_error(batch_size: int = 1, decode_error: bool = True) -> RuntimeError |
133
140
  Returns: error object, can be none
134
141
 
135
142
  """
136
- error_code: int = pgc.error_code()
137
- if error_code == PGM_NO_ERROR:
138
- return None
139
- if error_code == PGM_REGULAR_ERROR:
140
- error_message = pgc.error_message()
141
- error_message += VALIDATOR_MSG
142
- return _interpret_error(error_message, decode_error=decode_error)
143
- if error_code == PGM_BATCH_ERROR:
144
- error_message = "There are errors in the batch calculation." + VALIDATOR_MSG
145
- error = PowerGridBatchError(error_message)
146
- n_fails = pgc.n_failed_scenarios()
147
- failed_idxptr = pgc.failed_scenarios()
148
- failed_msgptr = pgc.batch_errors()
149
- error.failed_scenarios = np.ctypeslib.as_array(failed_idxptr, shape=(n_fails,)).copy()
150
- error.error_messages = [failed_msgptr[i].decode() for i in range(n_fails)] # type: ignore
151
- error.errors = [_interpret_error(message, decode_error=decode_error) for message in error.error_messages]
152
- all_scenarios = np.arange(batch_size, dtype=IdxNp)
153
- mask = np.ones(batch_size, dtype=np.bool_)
154
- mask[error.failed_scenarios] = False
155
- error.succeeded_scenarios = all_scenarios[mask]
156
- return error
157
- if error_code == PGM_SERIALIZATION_ERROR:
158
- return PowerGridSerializationError(pgc.error_message())
159
- return RuntimeError("Unknown error!")
143
+ error_code: int = get_pgc().error_code()
144
+ match error_code:
145
+ case _PgmCErrorCode.NO_ERROR:
146
+ return None
147
+ case _PgmCErrorCode.REGULAR_ERROR:
148
+ error_message = get_pgc().error_message()
149
+ error_message += VALIDATOR_MSG
150
+ return _interpret_error(error_message, decode_error=decode_error)
151
+ case _PgmCErrorCode.BATCH_ERROR:
152
+ error_message = "There are errors in the batch calculation." + VALIDATOR_MSG
153
+ error = PowerGridBatchError(error_message)
154
+ n_fails = get_pgc().n_failed_scenarios()
155
+ failed_idxptr = get_pgc().failed_scenarios()
156
+ failed_msgptr = get_pgc().batch_errors()
157
+ error.failed_scenarios = np.ctypeslib.as_array(failed_idxptr, shape=(n_fails,)).copy()
158
+ error.error_messages = [failed_msgptr[i].decode() for i in range(n_fails)] # type: ignore
159
+ error.errors = [_interpret_error(message, decode_error=decode_error) for message in error.error_messages]
160
+ all_scenarios = np.arange(batch_size, dtype=IdxNp)
161
+ mask = np.ones(batch_size, dtype=np.bool_)
162
+ mask[error.failed_scenarios] = False
163
+ error.succeeded_scenarios = all_scenarios[mask]
164
+ return error
165
+ case _PgmCErrorCode.SERIALIZATION_ERROR:
166
+ return PowerGridSerializationError(get_pgc().error_message())
167
+ case _:
168
+ return RuntimeError("Unknown error!")
160
169
 
161
170
 
162
171
  def assert_no_error(batch_size: int = 1, decode_error: bool = True):
@@ -124,6 +124,10 @@ class PowerGridNotImplementedError(PowerGridError):
124
124
  """The functionality is either not supported or not yet implemented."""
125
125
 
126
126
 
127
+ class PowerGridIllegalOperationError(PowerGridError):
128
+ """An illegal operation was attempted to the C API, such as accessing a null pointer."""
129
+
130
+
127
131
  class PowerGridUnreachableHitError(PowerGridError):
128
132
  """Supposedly unreachable code was hit.
129
133
 
@@ -9,7 +9,7 @@ Option class
9
9
  from collections.abc import Callable
10
10
  from typing import Any
11
11
 
12
- from power_grid_model._core.power_grid_core import OptionsPtr, power_grid_core as pgc
12
+ from power_grid_model._core.power_grid_core import OptionsPtr, get_power_grid_core as get_pgc
13
13
 
14
14
 
15
15
  class OptionSetter:
@@ -36,15 +36,15 @@ class Options:
36
36
 
37
37
  _opt: OptionsPtr
38
38
  # option setter
39
- calculation_type = OptionSetter(pgc.set_calculation_type)
40
- calculation_method = OptionSetter(pgc.set_calculation_method)
41
- symmetric = OptionSetter(pgc.set_symmetric)
42
- error_tolerance = OptionSetter(pgc.set_err_tol)
43
- max_iterations = OptionSetter(pgc.set_max_iter)
44
- threading = OptionSetter(pgc.set_threading)
45
- tap_changing_strategy = OptionSetter(pgc.set_tap_changing_strategy)
46
- short_circuit_voltage_scaling = OptionSetter(pgc.set_short_circuit_voltage_scaling)
47
- experimental_features = OptionSetter(pgc.set_experimental_features)
39
+ calculation_type = OptionSetter(get_pgc().set_calculation_type)
40
+ calculation_method = OptionSetter(get_pgc().set_calculation_method)
41
+ symmetric = OptionSetter(get_pgc().set_symmetric)
42
+ error_tolerance = OptionSetter(get_pgc().set_err_tol)
43
+ max_iterations = OptionSetter(get_pgc().set_max_iter)
44
+ threading = OptionSetter(get_pgc().set_threading)
45
+ tap_changing_strategy = OptionSetter(get_pgc().set_tap_changing_strategy)
46
+ short_circuit_voltage_scaling = OptionSetter(get_pgc().set_short_circuit_voltage_scaling)
47
+ experimental_features = OptionSetter(get_pgc().set_experimental_features)
48
48
 
49
49
  @property
50
50
  def opt(self) -> OptionsPtr:
@@ -57,11 +57,11 @@ class Options:
57
57
 
58
58
  def __new__(cls, *args, **kwargs):
59
59
  instance = super().__new__(cls, *args, **kwargs)
60
- instance._opt = pgc.create_options()
60
+ instance._opt = get_pgc().create_options()
61
61
  return instance
62
62
 
63
63
  def __del__(self):
64
- pgc.destroy_options(self._opt)
64
+ get_pgc().destroy_options(self._opt)
65
65
 
66
66
  # not copyable
67
67
  def __copy__(self):
@@ -6,6 +6,7 @@
6
6
  Loader for the dynamic library
7
7
  """
8
8
 
9
+ import threading
9
10
  from collections.abc import Callable
10
11
  from ctypes import CDLL, POINTER, c_char, c_char_p, c_double, c_size_t, c_void_p
11
12
  from inspect import signature
@@ -14,6 +15,10 @@ from itertools import chain
14
15
  from power_grid_model._core.index_integer import IdC, IdxC
15
16
  from power_grid_model._core.power_grid_model_c.get_pgm_dll_path import get_pgm_dll_path
16
17
 
18
+ # threading local
19
+ _thread_local_data = threading.local()
20
+
21
+
17
22
  # integer index
18
23
  IdxPtr = POINTER(IdxC)
19
24
  """Pointer to index."""
@@ -195,14 +200,12 @@ class PowerGridCore:
195
200
  """
196
201
 
197
202
  _handle: HandlePtr
198
- _instance: "PowerGridCore | None" = None
199
203
 
200
204
  # singleton of power grid core
201
205
  def __new__(cls, *args, **kwargs):
202
- if cls._instance is None:
203
- cls._instance = super().__new__(cls, *args, **kwargs)
204
- cls._instance._handle = _CDLL.PGM_create_handle()
205
- return cls._instance
206
+ instance = super().__new__(cls, *args, **kwargs)
207
+ instance._handle = _CDLL.PGM_create_handle()
208
+ return instance
206
209
 
207
210
  def __del__(self):
208
211
  _CDLL.PGM_destroy_handle(self._handle)
@@ -481,6 +484,12 @@ class PowerGridCore:
481
484
  def dataset_const_get_info(self, dataset: ConstDatasetPtr) -> DatasetInfoPtr: # type: ignore[empty-body]
482
485
  pass # pragma: no cover
483
486
 
487
+ @make_c_binding
488
+ def dataset_const_set_next_cartesian_product_dimension(
489
+ self, dataset: ConstDatasetPtr, next_dataset: ConstDatasetPtr
490
+ ) -> None: # type: ignore[empty-body]
491
+ pass # pragma: no cover
492
+
484
493
  @make_c_binding
485
494
  def dataset_writable_get_info(self, dataset: WritableDatasetPtr) -> DatasetInfoPtr: # type: ignore[empty-body]
486
495
  pass # pragma: no cover
@@ -560,4 +569,9 @@ class PowerGridCore:
560
569
 
561
570
 
562
571
  # make one instance
563
- power_grid_core = PowerGridCore()
572
+ def get_power_grid_core() -> PowerGridCore:
573
+ try:
574
+ return _thread_local_data.power_grid_core
575
+ except AttributeError:
576
+ _thread_local_data.power_grid_core = PowerGridCore()
577
+ return _thread_local_data.power_grid_core