pyconvexity 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyconvexity/__init__.py +87 -46
- pyconvexity/_version.py +1 -1
- pyconvexity/core/__init__.py +3 -5
- pyconvexity/core/database.py +111 -103
- pyconvexity/core/errors.py +16 -10
- pyconvexity/core/types.py +61 -54
- pyconvexity/data/__init__.py +0 -1
- pyconvexity/data/loaders/cache.py +65 -64
- pyconvexity/data/schema/01_core_schema.sql +134 -234
- pyconvexity/data/schema/02_data_metadata.sql +38 -168
- pyconvexity/data/schema/03_validation_data.sql +327 -264
- pyconvexity/data/sources/gem.py +169 -139
- pyconvexity/io/__init__.py +4 -10
- pyconvexity/io/excel_exporter.py +694 -480
- pyconvexity/io/excel_importer.py +817 -545
- pyconvexity/io/netcdf_exporter.py +66 -61
- pyconvexity/io/netcdf_importer.py +850 -619
- pyconvexity/models/__init__.py +109 -59
- pyconvexity/models/attributes.py +197 -178
- pyconvexity/models/carriers.py +70 -67
- pyconvexity/models/components.py +260 -236
- pyconvexity/models/network.py +202 -284
- pyconvexity/models/results.py +65 -55
- pyconvexity/models/scenarios.py +58 -88
- pyconvexity/solvers/__init__.py +5 -5
- pyconvexity/solvers/pypsa/__init__.py +3 -3
- pyconvexity/solvers/pypsa/api.py +150 -134
- pyconvexity/solvers/pypsa/batch_loader.py +165 -162
- pyconvexity/solvers/pypsa/builder.py +390 -291
- pyconvexity/solvers/pypsa/constraints.py +184 -162
- pyconvexity/solvers/pypsa/solver.py +968 -663
- pyconvexity/solvers/pypsa/storage.py +1377 -671
- pyconvexity/timeseries.py +63 -60
- pyconvexity/validation/__init__.py +14 -6
- pyconvexity/validation/rules.py +95 -84
- pyconvexity-0.4.1.dist-info/METADATA +46 -0
- pyconvexity-0.4.1.dist-info/RECORD +42 -0
- pyconvexity/data/schema/04_scenario_schema.sql +0 -122
- pyconvexity/data/schema/migrate_add_geometries.sql +0 -73
- pyconvexity-0.4.0.dist-info/METADATA +0 -138
- pyconvexity-0.4.0.dist-info/RECORD +0 -44
- {pyconvexity-0.4.0.dist-info → pyconvexity-0.4.1.dist-info}/WHEEL +0 -0
- {pyconvexity-0.4.0.dist-info → pyconvexity-0.4.1.dist-info}/top_level.txt +0 -0
pyconvexity/solvers/pypsa/api.py
CHANGED
|
@@ -18,7 +18,6 @@ logger = logging.getLogger(__name__)
|
|
|
18
18
|
|
|
19
19
|
def solve_network(
|
|
20
20
|
db_path: str,
|
|
21
|
-
network_id: int,
|
|
22
21
|
scenario_id: Optional[int] = None,
|
|
23
22
|
solver_name: str = "highs",
|
|
24
23
|
solver_options: Optional[Dict[str, Any]] = None,
|
|
@@ -26,19 +25,19 @@ def solve_network(
|
|
|
26
25
|
discount_rate: Optional[float] = None,
|
|
27
26
|
progress_callback: Optional[Callable[[int, str], None]] = None,
|
|
28
27
|
return_detailed_results: bool = True,
|
|
29
|
-
custom_solver_config: Optional[Dict[str, Any]] = None
|
|
28
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
29
|
+
include_unmet_loads: bool = True,
|
|
30
30
|
) -> Dict[str, Any]:
|
|
31
31
|
"""
|
|
32
|
-
Complete solve workflow: build PyPSA network from database, solve, store results.
|
|
33
|
-
|
|
32
|
+
Complete solve workflow: build PyPSA network from database, solve, store results (single network per database).
|
|
33
|
+
|
|
34
34
|
This is the main high-level function that most users should use. It handles
|
|
35
35
|
the complete workflow of loading data from database, building a PyPSA network,
|
|
36
36
|
solving it, and storing results back to the database.
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
Args:
|
|
39
39
|
db_path: Path to the database file
|
|
40
|
-
|
|
41
|
-
scenario_id: Optional scenario ID (uses master scenario if None)
|
|
40
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
42
41
|
solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
|
|
43
42
|
solver_options: Optional solver-specific options
|
|
44
43
|
constraints_dsl: Optional DSL constraints to apply
|
|
@@ -48,10 +47,11 @@ def solve_network(
|
|
|
48
47
|
custom_solver_config: Optional custom solver configuration when solver_name="custom"
|
|
49
48
|
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
50
49
|
Example: {"solver": "gurobi", "solver_options": {"Method": 2, "Crossover": 0}}
|
|
51
|
-
|
|
50
|
+
include_unmet_loads: Whether to include unmet load components in the network (default: True)
|
|
51
|
+
|
|
52
52
|
Returns:
|
|
53
53
|
Dictionary with solve results - comprehensive if return_detailed_results=True, simple status otherwise
|
|
54
|
-
|
|
54
|
+
|
|
55
55
|
Raises:
|
|
56
56
|
DatabaseError: If database operations fail
|
|
57
57
|
ValidationError: If network data is invalid
|
|
@@ -59,137 +59,157 @@ def solve_network(
|
|
|
59
59
|
"""
|
|
60
60
|
if progress_callback:
|
|
61
61
|
progress_callback(0, "Starting network solve...")
|
|
62
|
-
|
|
62
|
+
|
|
63
63
|
with database_context(db_path) as conn:
|
|
64
|
-
# Load network configuration with scenario awareness
|
|
64
|
+
# Load network configuration with scenario awareness
|
|
65
65
|
from pyconvexity.models import get_network_config
|
|
66
|
-
|
|
66
|
+
|
|
67
|
+
network_config = get_network_config(conn, scenario_id)
|
|
67
68
|
if progress_callback:
|
|
68
69
|
progress_callback(8, "Loaded network configuration")
|
|
69
|
-
|
|
70
|
+
|
|
70
71
|
# Use configuration values with parameter overrides
|
|
71
72
|
# Note: network_config already has default of 0.0 from get_network_config()
|
|
72
|
-
effective_discount_rate =
|
|
73
|
-
|
|
74
|
-
|
|
73
|
+
effective_discount_rate = (
|
|
74
|
+
discount_rate
|
|
75
|
+
if discount_rate is not None
|
|
76
|
+
else network_config.get("discount_rate")
|
|
77
|
+
)
|
|
78
|
+
logger.info(
|
|
79
|
+
f"Using discount rate: {effective_discount_rate} (from {'parameter override' if discount_rate is not None else 'network config'})"
|
|
80
|
+
)
|
|
81
|
+
|
|
75
82
|
# Build network
|
|
76
83
|
if progress_callback:
|
|
77
84
|
progress_callback(10, "Building PyPSA network...")
|
|
78
|
-
|
|
85
|
+
|
|
79
86
|
builder = NetworkBuilder()
|
|
80
|
-
network = builder.build_network(
|
|
81
|
-
|
|
87
|
+
network = builder.build_network(
|
|
88
|
+
conn, scenario_id, progress_callback, include_unmet_loads
|
|
89
|
+
)
|
|
90
|
+
|
|
82
91
|
if progress_callback:
|
|
83
|
-
progress_callback(
|
|
84
|
-
|
|
92
|
+
progress_callback(
|
|
93
|
+
50,
|
|
94
|
+
f"Network built: {len(network.buses)} buses, {len(network.generators)} generators",
|
|
95
|
+
)
|
|
96
|
+
|
|
85
97
|
# Create constraint applicator and apply constraints BEFORE solve
|
|
86
98
|
constraint_applicator = ConstraintApplicator()
|
|
87
|
-
|
|
99
|
+
|
|
88
100
|
# Apply constraints before solving (network modifications like GlobalConstraints)
|
|
89
101
|
if progress_callback:
|
|
90
102
|
progress_callback(60, "Applying constraints...")
|
|
91
|
-
|
|
103
|
+
|
|
92
104
|
constraint_applicator.apply_constraints(
|
|
93
|
-
conn,
|
|
105
|
+
conn, network, scenario_id, constraints_dsl
|
|
94
106
|
)
|
|
95
|
-
|
|
107
|
+
|
|
96
108
|
# Solve network
|
|
97
109
|
if progress_callback:
|
|
98
110
|
progress_callback(70, f"Solving with {solver_name}...")
|
|
99
|
-
|
|
111
|
+
|
|
100
112
|
solver = NetworkSolver()
|
|
101
113
|
solve_result = solver.solve_network(
|
|
102
|
-
network,
|
|
114
|
+
network,
|
|
103
115
|
solver_name=solver_name,
|
|
104
116
|
solver_options=solver_options,
|
|
105
117
|
discount_rate=effective_discount_rate, # Use effective discount rate from config
|
|
106
118
|
conn=conn,
|
|
107
|
-
network_id=network_id,
|
|
108
119
|
scenario_id=scenario_id,
|
|
109
120
|
constraint_applicator=constraint_applicator,
|
|
110
|
-
custom_solver_config=custom_solver_config
|
|
121
|
+
custom_solver_config=custom_solver_config,
|
|
111
122
|
)
|
|
112
|
-
|
|
123
|
+
|
|
113
124
|
if progress_callback:
|
|
114
125
|
progress_callback(85, "Storing results...")
|
|
115
|
-
|
|
126
|
+
|
|
116
127
|
# Store results - ALWAYS store results regardless of return_detailed_results flag
|
|
117
128
|
storage = ResultStorage()
|
|
118
|
-
storage_result = storage.store_results(
|
|
119
|
-
|
|
120
|
-
)
|
|
121
|
-
|
|
129
|
+
storage_result = storage.store_results(conn, network, solve_result, scenario_id)
|
|
130
|
+
|
|
122
131
|
if progress_callback:
|
|
123
132
|
progress_callback(95, "Solve completed successfully")
|
|
124
|
-
|
|
133
|
+
|
|
125
134
|
# Optimize database after successful solve (if solve was successful)
|
|
126
135
|
if solve_result.get("success", False):
|
|
127
136
|
try:
|
|
128
137
|
if progress_callback:
|
|
129
138
|
progress_callback(98, "Optimizing database...")
|
|
130
|
-
|
|
131
|
-
from pyconvexity.core.database import
|
|
132
|
-
|
|
139
|
+
|
|
140
|
+
from pyconvexity.core.database import (
|
|
141
|
+
should_optimize_database,
|
|
142
|
+
optimize_database,
|
|
143
|
+
)
|
|
144
|
+
|
|
133
145
|
# Only optimize if there's significant free space (>5% threshold for post-solve)
|
|
134
146
|
if should_optimize_database(conn, free_space_threshold_percent=5.0):
|
|
135
147
|
logger.info("Running database optimization after successful solve")
|
|
136
148
|
optimization_result = optimize_database(conn)
|
|
137
|
-
logger.info(
|
|
149
|
+
logger.info(
|
|
150
|
+
f"Database optimization completed: {optimization_result['space_reclaimed']:,} bytes reclaimed"
|
|
151
|
+
)
|
|
138
152
|
else:
|
|
139
|
-
logger.debug(
|
|
140
|
-
|
|
153
|
+
logger.debug(
|
|
154
|
+
"Skipping database optimization - insufficient free space"
|
|
155
|
+
)
|
|
156
|
+
|
|
141
157
|
except Exception as e:
|
|
142
158
|
# Don't fail the solve if optimization fails
|
|
143
159
|
logger.warning(f"Database optimization failed (non-critical): {e}")
|
|
144
|
-
|
|
160
|
+
|
|
145
161
|
if progress_callback:
|
|
146
162
|
progress_callback(100, "Complete")
|
|
147
|
-
|
|
163
|
+
|
|
148
164
|
# Return simple status if requested (for sidecar/async usage)
|
|
149
165
|
# Results are now stored in database regardless of this flag
|
|
150
166
|
if not return_detailed_results:
|
|
151
167
|
return {
|
|
152
168
|
"success": solve_result.get("success", False),
|
|
153
|
-
"message":
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
169
|
+
"message": (
|
|
170
|
+
"Solve completed successfully"
|
|
171
|
+
if solve_result.get("success")
|
|
172
|
+
else "Solve failed"
|
|
173
|
+
),
|
|
174
|
+
"error": (
|
|
175
|
+
solve_result.get("error")
|
|
176
|
+
if not solve_result.get("success")
|
|
177
|
+
else None
|
|
178
|
+
),
|
|
179
|
+
"scenario_id": scenario_id,
|
|
157
180
|
}
|
|
158
|
-
|
|
181
|
+
|
|
159
182
|
# Combine results in comprehensive format for detailed analysis
|
|
160
183
|
comprehensive_result = {
|
|
161
184
|
**solve_result,
|
|
162
185
|
"storage_stats": storage_result,
|
|
163
|
-
"
|
|
164
|
-
"scenario_id": scenario_id
|
|
186
|
+
"scenario_id": scenario_id,
|
|
165
187
|
}
|
|
166
|
-
|
|
188
|
+
|
|
167
189
|
# Transform to include sidecar-compatible format
|
|
168
190
|
return _transform_to_comprehensive_format(comprehensive_result)
|
|
169
191
|
|
|
170
192
|
|
|
171
193
|
def build_pypsa_network(
|
|
172
194
|
db_path: str,
|
|
173
|
-
network_id: int,
|
|
174
195
|
scenario_id: Optional[int] = None,
|
|
175
|
-
progress_callback: Optional[Callable[[int, str], None]] = None
|
|
176
|
-
) ->
|
|
196
|
+
progress_callback: Optional[Callable[[int, str], None]] = None,
|
|
197
|
+
) -> "pypsa.Network":
|
|
177
198
|
"""
|
|
178
|
-
Build PyPSA network object from database.
|
|
179
|
-
|
|
199
|
+
Build PyPSA network object from database (single network per database).
|
|
200
|
+
|
|
180
201
|
This function loads all network data from the database and constructs
|
|
181
202
|
a PyPSA Network object ready for solving or analysis. Useful when you
|
|
182
203
|
want to inspect or modify the network before solving.
|
|
183
|
-
|
|
204
|
+
|
|
184
205
|
Args:
|
|
185
206
|
db_path: Path to the database file
|
|
186
|
-
|
|
187
|
-
scenario_id: Optional scenario ID (uses master scenario if None)
|
|
207
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
188
208
|
progress_callback: Optional callback for progress updates
|
|
189
|
-
|
|
209
|
+
|
|
190
210
|
Returns:
|
|
191
211
|
PyPSA Network object ready for solving
|
|
192
|
-
|
|
212
|
+
|
|
193
213
|
Raises:
|
|
194
214
|
DatabaseError: If database operations fail
|
|
195
215
|
ValidationError: If network data is invalid
|
|
@@ -197,32 +217,30 @@ def build_pypsa_network(
|
|
|
197
217
|
"""
|
|
198
218
|
with database_context(db_path) as conn:
|
|
199
219
|
builder = NetworkBuilder()
|
|
200
|
-
return builder.build_network(conn,
|
|
220
|
+
return builder.build_network(conn, scenario_id, progress_callback)
|
|
201
221
|
|
|
202
222
|
|
|
203
223
|
def solve_pypsa_network(
|
|
204
|
-
network:
|
|
224
|
+
network: "pypsa.Network",
|
|
205
225
|
db_path: str,
|
|
206
|
-
network_id: int,
|
|
207
226
|
scenario_id: Optional[int] = None,
|
|
208
227
|
solver_name: str = "highs",
|
|
209
228
|
solver_options: Optional[Dict[str, Any]] = None,
|
|
210
229
|
discount_rate: Optional[float] = None,
|
|
211
230
|
store_results: bool = True,
|
|
212
231
|
progress_callback: Optional[Callable[[int, str], None]] = None,
|
|
213
|
-
custom_solver_config: Optional[Dict[str, Any]] = None
|
|
232
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
214
233
|
) -> Dict[str, Any]:
|
|
215
234
|
"""
|
|
216
|
-
Solve PyPSA network and optionally store results back to database.
|
|
217
|
-
|
|
235
|
+
Solve PyPSA network and optionally store results back to database (single network per database).
|
|
236
|
+
|
|
218
237
|
This function takes an existing PyPSA network (e.g., from build_pypsa_network),
|
|
219
238
|
solves it, and optionally stores the results back to the database.
|
|
220
|
-
|
|
239
|
+
|
|
221
240
|
Args:
|
|
222
241
|
network: PyPSA Network object to solve
|
|
223
242
|
db_path: Path to the database file (needed for result storage)
|
|
224
|
-
|
|
225
|
-
scenario_id: Optional scenario ID
|
|
243
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
226
244
|
solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
|
|
227
245
|
solver_options: Optional solver-specific options
|
|
228
246
|
discount_rate: Optional discount rate for multi-period optimization (default: 0.0)
|
|
@@ -230,17 +248,17 @@ def solve_pypsa_network(
|
|
|
230
248
|
progress_callback: Optional callback for progress updates
|
|
231
249
|
custom_solver_config: Optional custom solver configuration when solver_name="custom"
|
|
232
250
|
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
233
|
-
|
|
251
|
+
|
|
234
252
|
Returns:
|
|
235
253
|
Dictionary with solve results and statistics
|
|
236
|
-
|
|
254
|
+
|
|
237
255
|
Raises:
|
|
238
256
|
DatabaseError: If database operations fail (when store_results=True)
|
|
239
257
|
ImportError: If PyPSA is not available
|
|
240
258
|
"""
|
|
241
259
|
if progress_callback:
|
|
242
260
|
progress_callback(0, f"Solving network with {solver_name}...")
|
|
243
|
-
|
|
261
|
+
|
|
244
262
|
# Solve network
|
|
245
263
|
solver = NetworkSolver()
|
|
246
264
|
solve_result = solver.solve_network(
|
|
@@ -248,77 +266,72 @@ def solve_pypsa_network(
|
|
|
248
266
|
solver_name=solver_name,
|
|
249
267
|
solver_options=solver_options,
|
|
250
268
|
discount_rate=discount_rate,
|
|
251
|
-
custom_solver_config=custom_solver_config
|
|
269
|
+
custom_solver_config=custom_solver_config,
|
|
252
270
|
)
|
|
253
|
-
|
|
271
|
+
|
|
254
272
|
if progress_callback:
|
|
255
273
|
progress_callback(70, "Solve completed")
|
|
256
|
-
|
|
274
|
+
|
|
257
275
|
# Store results if requested
|
|
258
276
|
if store_results:
|
|
259
277
|
if progress_callback:
|
|
260
278
|
progress_callback(80, "Storing results...")
|
|
261
|
-
|
|
279
|
+
|
|
262
280
|
with database_context(db_path) as conn:
|
|
263
281
|
storage = ResultStorage()
|
|
264
282
|
storage_result = storage.store_results(
|
|
265
|
-
conn,
|
|
283
|
+
conn, network, solve_result, scenario_id
|
|
266
284
|
)
|
|
267
285
|
solve_result["storage_stats"] = storage_result
|
|
268
|
-
|
|
286
|
+
|
|
269
287
|
if progress_callback:
|
|
270
288
|
progress_callback(100, "Complete")
|
|
271
|
-
|
|
289
|
+
|
|
272
290
|
return solve_result
|
|
273
291
|
|
|
274
292
|
|
|
275
293
|
def load_network_components(
|
|
276
|
-
db_path: str,
|
|
277
|
-
network_id: int,
|
|
278
|
-
scenario_id: Optional[int] = None
|
|
294
|
+
db_path: str, scenario_id: Optional[int] = None
|
|
279
295
|
) -> Dict[str, Any]:
|
|
280
296
|
"""
|
|
281
|
-
Load all network components and attributes as structured data.
|
|
282
|
-
|
|
297
|
+
Load all network components and attributes as structured data (single network per database).
|
|
298
|
+
|
|
283
299
|
This low-level function loads network data without building a PyPSA network.
|
|
284
300
|
Useful for analysis, validation, or building custom network representations.
|
|
285
|
-
|
|
301
|
+
|
|
286
302
|
Args:
|
|
287
303
|
db_path: Path to the database file
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
304
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
305
|
+
|
|
291
306
|
Returns:
|
|
292
307
|
Dictionary containing all network components and metadata
|
|
293
|
-
|
|
308
|
+
|
|
294
309
|
Raises:
|
|
295
310
|
DatabaseError: If database operations fail
|
|
296
311
|
"""
|
|
297
312
|
with database_context(db_path) as conn:
|
|
298
313
|
builder = NetworkBuilder()
|
|
299
|
-
return builder.load_network_data(conn,
|
|
314
|
+
return builder.load_network_data(conn, scenario_id)
|
|
300
315
|
|
|
301
316
|
|
|
302
317
|
def apply_constraints(
|
|
303
|
-
network:
|
|
318
|
+
network: "pypsa.Network",
|
|
304
319
|
db_path: str,
|
|
305
|
-
network_id: int,
|
|
306
320
|
scenario_id: Optional[int] = None,
|
|
307
|
-
constraints_dsl: Optional[str] = None
|
|
321
|
+
constraints_dsl: Optional[str] = None,
|
|
308
322
|
) -> None:
|
|
309
323
|
"""
|
|
310
|
-
Apply custom constraints to PyPSA network.
|
|
311
|
-
|
|
324
|
+
Apply custom constraints to PyPSA network (single network per database).
|
|
325
|
+
|
|
312
326
|
This function applies database-stored constraints and optional DSL constraints
|
|
313
327
|
to an existing PyPSA network. Modifies the network in-place.
|
|
314
|
-
|
|
328
|
+
|
|
315
329
|
Args:
|
|
316
330
|
network: PyPSA Network object to modify
|
|
317
331
|
db_path: Path to the database file
|
|
318
|
-
|
|
319
|
-
scenario_id: Optional scenario ID
|
|
332
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
320
333
|
constraints_dsl: Optional DSL constraints string
|
|
321
|
-
|
|
334
|
+
|
|
322
335
|
Raises:
|
|
323
336
|
DatabaseError: If database operations fail
|
|
324
337
|
ValidationError: If constraints are invalid
|
|
@@ -326,55 +339,53 @@ def apply_constraints(
|
|
|
326
339
|
with database_context(db_path) as conn:
|
|
327
340
|
constraint_applicator = ConstraintApplicator()
|
|
328
341
|
constraint_applicator.apply_constraints(
|
|
329
|
-
conn,
|
|
342
|
+
conn, network, scenario_id, constraints_dsl
|
|
330
343
|
)
|
|
331
344
|
|
|
332
345
|
|
|
333
346
|
def store_solve_results(
|
|
334
|
-
network:
|
|
347
|
+
network: "pypsa.Network",
|
|
335
348
|
db_path: str,
|
|
336
|
-
network_id: int,
|
|
337
349
|
scenario_id: Optional[int],
|
|
338
|
-
solve_metadata: Dict[str, Any]
|
|
350
|
+
solve_metadata: Dict[str, Any],
|
|
339
351
|
) -> Dict[str, Any]:
|
|
340
352
|
"""
|
|
341
|
-
Store PyPSA solve results back to database.
|
|
342
|
-
|
|
353
|
+
Store PyPSA solve results back to database (single network per database).
|
|
354
|
+
|
|
343
355
|
This low-level function stores solve results from a PyPSA network back
|
|
344
356
|
to the database. Useful when you want full control over the solving process
|
|
345
357
|
but still want to store results in the standard format.
|
|
346
|
-
|
|
358
|
+
|
|
347
359
|
Args:
|
|
348
360
|
network: Solved PyPSA Network object
|
|
349
361
|
db_path: Path to the database file
|
|
350
|
-
|
|
351
|
-
scenario_id: Scenario ID for result storage
|
|
362
|
+
scenario_id: Scenario ID for result storage (NULL for base network)
|
|
352
363
|
solve_metadata: Dictionary with solve metadata (solver_name, solve_time, etc.)
|
|
353
|
-
|
|
364
|
+
|
|
354
365
|
Returns:
|
|
355
366
|
Dictionary with storage statistics
|
|
356
|
-
|
|
367
|
+
|
|
357
368
|
Raises:
|
|
358
369
|
DatabaseError: If database operations fail
|
|
359
370
|
"""
|
|
360
371
|
with database_context(db_path) as conn:
|
|
361
372
|
storage = ResultStorage()
|
|
362
|
-
return storage.store_results(
|
|
363
|
-
conn, network_id, network, solve_metadata, scenario_id
|
|
364
|
-
)
|
|
373
|
+
return storage.store_results(conn, network, solve_metadata, scenario_id)
|
|
365
374
|
|
|
366
375
|
|
|
367
|
-
def _transform_to_comprehensive_format(
|
|
376
|
+
def _transform_to_comprehensive_format(
|
|
377
|
+
pyconvexity_result: Dict[str, Any],
|
|
378
|
+
) -> Dict[str, Any]:
|
|
368
379
|
"""
|
|
369
380
|
Transform pyconvexity result to comprehensive format that includes both
|
|
370
381
|
the original structure and sidecar-compatible fields.
|
|
371
|
-
|
|
382
|
+
|
|
372
383
|
This ensures compatibility with existing sidecar code while providing
|
|
373
384
|
a clean API for direct pyconvexity users.
|
|
374
|
-
|
|
385
|
+
|
|
375
386
|
Args:
|
|
376
387
|
pyconvexity_result: Result from pyconvexity solve operations
|
|
377
|
-
|
|
388
|
+
|
|
378
389
|
Returns:
|
|
379
390
|
Comprehensive result with both original and sidecar-compatible fields
|
|
380
391
|
"""
|
|
@@ -384,17 +395,16 @@ def _transform_to_comprehensive_format(pyconvexity_result: Dict[str, Any]) -> Di
|
|
|
384
395
|
status = pyconvexity_result.get("status", "unknown")
|
|
385
396
|
solve_time = pyconvexity_result.get("solve_time", 0.0)
|
|
386
397
|
objective_value = pyconvexity_result.get("objective_value")
|
|
387
|
-
|
|
398
|
+
|
|
388
399
|
# Extract storage stats
|
|
389
400
|
storage_stats = pyconvexity_result.get("storage_stats", {})
|
|
390
401
|
component_stats = storage_stats.get("component_stats", {})
|
|
391
402
|
network_stats = storage_stats.get("network_stats", {})
|
|
392
|
-
|
|
403
|
+
|
|
393
404
|
# Create comprehensive result that includes both formats
|
|
394
405
|
comprehensive_result = {
|
|
395
406
|
# Original pyconvexity format (for direct users)
|
|
396
407
|
**pyconvexity_result,
|
|
397
|
-
|
|
398
408
|
# Sidecar-compatible format (for backward compatibility)
|
|
399
409
|
"network_statistics": {
|
|
400
410
|
"total_generation_mwh": network_stats.get("total_generation_mwh", 0.0),
|
|
@@ -405,35 +415,41 @@ def _transform_to_comprehensive_format(pyconvexity_result: Dict[str, Any]) -> Di
|
|
|
405
415
|
"num_generators": network_stats.get("num_generators", 0),
|
|
406
416
|
"num_loads": network_stats.get("num_loads", 0),
|
|
407
417
|
"num_lines": network_stats.get("num_lines", 0),
|
|
408
|
-
"num_links": network_stats.get("num_links", 0)
|
|
418
|
+
"num_links": network_stats.get("num_links", 0),
|
|
409
419
|
},
|
|
410
|
-
|
|
411
420
|
"component_storage_stats": {
|
|
412
421
|
"stored_bus_results": component_stats.get("stored_bus_results", 0),
|
|
413
|
-
"stored_generator_results": component_stats.get(
|
|
414
|
-
|
|
422
|
+
"stored_generator_results": component_stats.get(
|
|
423
|
+
"stored_generator_results", 0
|
|
424
|
+
),
|
|
425
|
+
"stored_unmet_load_results": component_stats.get(
|
|
426
|
+
"stored_unmet_load_results", 0
|
|
427
|
+
),
|
|
415
428
|
"stored_load_results": component_stats.get("stored_load_results", 0),
|
|
416
429
|
"stored_line_results": component_stats.get("stored_line_results", 0),
|
|
417
430
|
"stored_link_results": component_stats.get("stored_link_results", 0),
|
|
418
|
-
"stored_storage_unit_results": component_stats.get(
|
|
431
|
+
"stored_storage_unit_results": component_stats.get(
|
|
432
|
+
"stored_storage_unit_results", 0
|
|
433
|
+
),
|
|
419
434
|
"stored_store_results": component_stats.get("stored_store_results", 0),
|
|
420
435
|
"skipped_attributes": component_stats.get("skipped_attributes", 0),
|
|
421
|
-
"errors": component_stats.get("errors", 0)
|
|
436
|
+
"errors": component_stats.get("errors", 0),
|
|
422
437
|
},
|
|
423
|
-
|
|
424
438
|
# Additional compatibility fields
|
|
425
439
|
"multi_period": pyconvexity_result.get("multi_period", False),
|
|
426
|
-
"years": pyconvexity_result.get("years", [])
|
|
440
|
+
"years": pyconvexity_result.get("years", []),
|
|
427
441
|
}
|
|
428
|
-
|
|
442
|
+
|
|
429
443
|
return comprehensive_result
|
|
430
|
-
|
|
444
|
+
|
|
431
445
|
except Exception as e:
|
|
432
|
-
logger.error(
|
|
446
|
+
logger.error(
|
|
447
|
+
f"Failed to transform result to comprehensive format: {e}", exc_info=True
|
|
448
|
+
)
|
|
433
449
|
# Return original result with error info if transformation fails
|
|
434
450
|
return {
|
|
435
451
|
**pyconvexity_result,
|
|
436
452
|
"transformation_error": str(e),
|
|
437
453
|
"network_statistics": {},
|
|
438
|
-
"component_storage_stats": {}
|
|
454
|
+
"component_storage_stats": {},
|
|
439
455
|
}
|