pyconvexity 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/__init__.py +226 -0
- pyconvexity/_version.py +1 -0
- pyconvexity/core/__init__.py +60 -0
- pyconvexity/core/database.py +485 -0
- pyconvexity/core/errors.py +106 -0
- pyconvexity/core/types.py +400 -0
- pyconvexity/data/README.md +101 -0
- pyconvexity/data/__init__.py +17 -0
- pyconvexity/data/loaders/__init__.py +3 -0
- pyconvexity/data/loaders/cache.py +213 -0
- pyconvexity/data/schema/01_core_schema.sql +420 -0
- pyconvexity/data/schema/02_data_metadata.sql +120 -0
- pyconvexity/data/schema/03_validation_data.sql +506 -0
- pyconvexity/data/sources/__init__.py +5 -0
- pyconvexity/data/sources/gem.py +442 -0
- pyconvexity/io/__init__.py +26 -0
- pyconvexity/io/excel_exporter.py +1226 -0
- pyconvexity/io/excel_importer.py +1381 -0
- pyconvexity/io/netcdf_exporter.py +197 -0
- pyconvexity/io/netcdf_importer.py +1833 -0
- pyconvexity/models/__init__.py +195 -0
- pyconvexity/models/attributes.py +730 -0
- pyconvexity/models/carriers.py +159 -0
- pyconvexity/models/components.py +611 -0
- pyconvexity/models/network.py +503 -0
- pyconvexity/models/results.py +148 -0
- pyconvexity/models/scenarios.py +234 -0
- pyconvexity/solvers/__init__.py +29 -0
- pyconvexity/solvers/pypsa/__init__.py +24 -0
- pyconvexity/solvers/pypsa/api.py +460 -0
- pyconvexity/solvers/pypsa/batch_loader.py +307 -0
- pyconvexity/solvers/pypsa/builder.py +675 -0
- pyconvexity/solvers/pypsa/constraints.py +405 -0
- pyconvexity/solvers/pypsa/solver.py +1509 -0
- pyconvexity/solvers/pypsa/storage.py +2048 -0
- pyconvexity/timeseries.py +330 -0
- pyconvexity/validation/__init__.py +25 -0
- pyconvexity/validation/rules.py +312 -0
- pyconvexity-0.4.3.dist-info/METADATA +47 -0
- pyconvexity-0.4.3.dist-info/RECORD +42 -0
- pyconvexity-0.4.3.dist-info/WHEEL +5 -0
- pyconvexity-0.4.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,460 @@
|
|
|
1
|
+
"""
|
|
2
|
+
High-level API for PyPSA solver integration.
|
|
3
|
+
|
|
4
|
+
Provides user-friendly functions for the most common workflows.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Dict, Any, Optional, Callable
|
|
9
|
+
|
|
10
|
+
from pyconvexity.core.database import database_context
|
|
11
|
+
from pyconvexity.solvers.pypsa.builder import NetworkBuilder
|
|
12
|
+
from pyconvexity.solvers.pypsa.solver import NetworkSolver
|
|
13
|
+
from pyconvexity.solvers.pypsa.storage import ResultStorage
|
|
14
|
+
from pyconvexity.solvers.pypsa.constraints import ConstraintApplicator
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def solve_network(
|
|
20
|
+
db_path: str,
|
|
21
|
+
scenario_id: Optional[int] = None,
|
|
22
|
+
solver_name: str = "highs",
|
|
23
|
+
solver_options: Optional[Dict[str, Any]] = None,
|
|
24
|
+
constraints_dsl: Optional[str] = None,
|
|
25
|
+
discount_rate: Optional[float] = None,
|
|
26
|
+
progress_callback: Optional[Callable[[int, str], None]] = None,
|
|
27
|
+
return_detailed_results: bool = True,
|
|
28
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
29
|
+
include_unmet_loads: bool = True,
|
|
30
|
+
verbose: bool = False,
|
|
31
|
+
) -> Dict[str, Any]:
|
|
32
|
+
"""
|
|
33
|
+
Complete solve workflow: build PyPSA network from database, solve, store results (single network per database).
|
|
34
|
+
|
|
35
|
+
This is the main high-level function that most users should use. It handles
|
|
36
|
+
the complete workflow of loading data from database, building a PyPSA network,
|
|
37
|
+
solving it, and storing results back to the database.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
db_path: Path to the database file
|
|
41
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
42
|
+
solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
|
|
43
|
+
solver_options: Optional solver-specific options
|
|
44
|
+
constraints_dsl: Optional DSL constraints to apply
|
|
45
|
+
discount_rate: Optional discount rate for multi-period optimization
|
|
46
|
+
progress_callback: Optional callback for progress updates (progress: int, message: str)
|
|
47
|
+
return_detailed_results: If True, return comprehensive results; if False, return simple status
|
|
48
|
+
custom_solver_config: Optional custom solver configuration when solver_name="custom"
|
|
49
|
+
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
50
|
+
Example: {"solver": "gurobi", "solver_options": {"Method": 2, "Crossover": 0}}
|
|
51
|
+
include_unmet_loads: Whether to include unmet load components in the network (default: True)
|
|
52
|
+
verbose: Enable detailed logging output (default: False)
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Dictionary with solve results - comprehensive if return_detailed_results=True, simple status otherwise
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
DatabaseError: If database operations fail
|
|
59
|
+
ValidationError: If network data is invalid
|
|
60
|
+
ImportError: If PyPSA is not available
|
|
61
|
+
"""
|
|
62
|
+
if progress_callback:
|
|
63
|
+
progress_callback(0, "Starting network solve...")
|
|
64
|
+
|
|
65
|
+
with database_context(db_path) as conn:
|
|
66
|
+
# Load network configuration with scenario awareness
|
|
67
|
+
from pyconvexity.models import get_network_config
|
|
68
|
+
|
|
69
|
+
network_config = get_network_config(conn, scenario_id)
|
|
70
|
+
if progress_callback:
|
|
71
|
+
progress_callback(8, "Loaded network configuration")
|
|
72
|
+
|
|
73
|
+
# Use configuration values with parameter overrides
|
|
74
|
+
# Note: network_config already has default of 0.0 from get_network_config()
|
|
75
|
+
effective_discount_rate = (
|
|
76
|
+
discount_rate
|
|
77
|
+
if discount_rate is not None
|
|
78
|
+
else network_config.get("discount_rate")
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Build network
|
|
82
|
+
if progress_callback:
|
|
83
|
+
progress_callback(10, "Building PyPSA network...")
|
|
84
|
+
|
|
85
|
+
builder = NetworkBuilder(verbose=verbose)
|
|
86
|
+
network = builder.build_network(
|
|
87
|
+
conn, scenario_id, progress_callback, include_unmet_loads
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if progress_callback:
|
|
91
|
+
progress_callback(
|
|
92
|
+
50,
|
|
93
|
+
f"Network built: {len(network.buses)} buses, {len(network.generators)} generators",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Create constraint applicator and apply constraints BEFORE solve
|
|
97
|
+
constraint_applicator = ConstraintApplicator()
|
|
98
|
+
|
|
99
|
+
# Apply constraints before solving (network modifications like GlobalConstraints)
|
|
100
|
+
if progress_callback:
|
|
101
|
+
progress_callback(60, "Applying constraints...")
|
|
102
|
+
|
|
103
|
+
constraint_applicator.apply_constraints(
|
|
104
|
+
conn, network, scenario_id, constraints_dsl
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Solve network
|
|
108
|
+
if progress_callback:
|
|
109
|
+
progress_callback(70, f"Solving with {solver_name}...")
|
|
110
|
+
|
|
111
|
+
solver = NetworkSolver(verbose=verbose)
|
|
112
|
+
solve_result = solver.solve_network(
|
|
113
|
+
network,
|
|
114
|
+
solver_name=solver_name,
|
|
115
|
+
solver_options=solver_options,
|
|
116
|
+
discount_rate=effective_discount_rate, # Use effective discount rate from config
|
|
117
|
+
conn=conn,
|
|
118
|
+
scenario_id=scenario_id,
|
|
119
|
+
constraint_applicator=constraint_applicator,
|
|
120
|
+
custom_solver_config=custom_solver_config,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
if progress_callback:
|
|
124
|
+
progress_callback(85, "Storing results...")
|
|
125
|
+
|
|
126
|
+
# Store results - ALWAYS store results regardless of return_detailed_results flag
|
|
127
|
+
storage = ResultStorage(verbose=verbose)
|
|
128
|
+
storage_result = storage.store_results(conn, network, solve_result, scenario_id)
|
|
129
|
+
|
|
130
|
+
if progress_callback:
|
|
131
|
+
progress_callback(95, "Solve completed successfully")
|
|
132
|
+
|
|
133
|
+
# Optimize database after successful solve (if solve was successful)
|
|
134
|
+
if solve_result.get("success", False):
|
|
135
|
+
try:
|
|
136
|
+
if progress_callback:
|
|
137
|
+
progress_callback(98, "Optimizing database...")
|
|
138
|
+
|
|
139
|
+
from pyconvexity.core.database import (
|
|
140
|
+
should_optimize_database,
|
|
141
|
+
optimize_database,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Only optimize if there's significant free space (>5% threshold for post-solve)
|
|
145
|
+
if should_optimize_database(conn, free_space_threshold_percent=5.0):
|
|
146
|
+
logger.info("Running database optimization after successful solve")
|
|
147
|
+
optimization_result = optimize_database(conn)
|
|
148
|
+
logger.info(
|
|
149
|
+
f"Database optimization completed: {optimization_result['space_reclaimed']:,} bytes reclaimed"
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
logger.debug(
|
|
153
|
+
"Skipping database optimization - insufficient free space"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
except Exception as e:
|
|
157
|
+
# Don't fail the solve if optimization fails
|
|
158
|
+
logger.warning(f"Database optimization failed (non-critical): {e}")
|
|
159
|
+
|
|
160
|
+
if progress_callback:
|
|
161
|
+
progress_callback(100, "Complete")
|
|
162
|
+
|
|
163
|
+
# Return simple status if requested (for sidecar/async usage)
|
|
164
|
+
# Results are now stored in database regardless of this flag
|
|
165
|
+
if not return_detailed_results:
|
|
166
|
+
return {
|
|
167
|
+
"success": solve_result.get("success", False),
|
|
168
|
+
"message": (
|
|
169
|
+
"Solve completed successfully"
|
|
170
|
+
if solve_result.get("success")
|
|
171
|
+
else "Solve failed"
|
|
172
|
+
),
|
|
173
|
+
"error": (
|
|
174
|
+
solve_result.get("error")
|
|
175
|
+
if not solve_result.get("success")
|
|
176
|
+
else None
|
|
177
|
+
),
|
|
178
|
+
"scenario_id": scenario_id,
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
# Combine results in comprehensive format for detailed analysis
|
|
182
|
+
comprehensive_result = {
|
|
183
|
+
**solve_result,
|
|
184
|
+
"storage_stats": storage_result,
|
|
185
|
+
"scenario_id": scenario_id,
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
# Transform to include sidecar-compatible format
|
|
189
|
+
return _transform_to_comprehensive_format(comprehensive_result)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def build_pypsa_network(
|
|
193
|
+
db_path: str,
|
|
194
|
+
scenario_id: Optional[int] = None,
|
|
195
|
+
progress_callback: Optional[Callable[[int, str], None]] = None,
|
|
196
|
+
verbose: bool = False,
|
|
197
|
+
) -> "pypsa.Network":
|
|
198
|
+
"""
|
|
199
|
+
Build PyPSA network object from database (single network per database).
|
|
200
|
+
|
|
201
|
+
This function loads all network data from the database and constructs
|
|
202
|
+
a PyPSA Network object ready for solving or analysis. Useful when you
|
|
203
|
+
want to inspect or modify the network before solving.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
db_path: Path to the database file
|
|
207
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
208
|
+
progress_callback: Optional callback for progress updates
|
|
209
|
+
verbose: Enable detailed logging output (default: False)
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
PyPSA Network object ready for solving
|
|
213
|
+
|
|
214
|
+
Raises:
|
|
215
|
+
DatabaseError: If database operations fail
|
|
216
|
+
ValidationError: If network data is invalid
|
|
217
|
+
ImportError: If PyPSA is not available
|
|
218
|
+
"""
|
|
219
|
+
with database_context(db_path) as conn:
|
|
220
|
+
builder = NetworkBuilder(verbose=verbose)
|
|
221
|
+
return builder.build_network(conn, scenario_id, progress_callback)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def solve_pypsa_network(
|
|
225
|
+
network: "pypsa.Network",
|
|
226
|
+
db_path: str,
|
|
227
|
+
scenario_id: Optional[int] = None,
|
|
228
|
+
solver_name: str = "highs",
|
|
229
|
+
solver_options: Optional[Dict[str, Any]] = None,
|
|
230
|
+
discount_rate: Optional[float] = None,
|
|
231
|
+
store_results: bool = True,
|
|
232
|
+
progress_callback: Optional[Callable[[int, str], None]] = None,
|
|
233
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
234
|
+
verbose: bool = False,
|
|
235
|
+
) -> Dict[str, Any]:
|
|
236
|
+
"""
|
|
237
|
+
Solve PyPSA network and optionally store results back to database (single network per database).
|
|
238
|
+
|
|
239
|
+
This function takes an existing PyPSA network (e.g., from build_pypsa_network),
|
|
240
|
+
solves it, and optionally stores the results back to the database.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
network: PyPSA Network object to solve
|
|
244
|
+
db_path: Path to the database file (needed for result storage)
|
|
245
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
246
|
+
solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
|
|
247
|
+
solver_options: Optional solver-specific options
|
|
248
|
+
discount_rate: Optional discount rate for multi-period optimization (default: 0.0)
|
|
249
|
+
store_results: Whether to store results back to database (default: True)
|
|
250
|
+
progress_callback: Optional callback for progress updates
|
|
251
|
+
custom_solver_config: Optional custom solver configuration when solver_name="custom"
|
|
252
|
+
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
253
|
+
verbose: Enable detailed logging output (default: False)
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
Dictionary with solve results and statistics
|
|
257
|
+
|
|
258
|
+
Raises:
|
|
259
|
+
DatabaseError: If database operations fail (when store_results=True)
|
|
260
|
+
ImportError: If PyPSA is not available
|
|
261
|
+
"""
|
|
262
|
+
if progress_callback:
|
|
263
|
+
progress_callback(0, f"Solving network with {solver_name}...")
|
|
264
|
+
|
|
265
|
+
# Solve network
|
|
266
|
+
solver = NetworkSolver(verbose=verbose)
|
|
267
|
+
solve_result = solver.solve_network(
|
|
268
|
+
network,
|
|
269
|
+
solver_name=solver_name,
|
|
270
|
+
solver_options=solver_options,
|
|
271
|
+
discount_rate=discount_rate,
|
|
272
|
+
custom_solver_config=custom_solver_config,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
if progress_callback:
|
|
276
|
+
progress_callback(70, "Solve completed")
|
|
277
|
+
|
|
278
|
+
# Store results if requested
|
|
279
|
+
if store_results:
|
|
280
|
+
if progress_callback:
|
|
281
|
+
progress_callback(80, "Storing results...")
|
|
282
|
+
|
|
283
|
+
with database_context(db_path) as conn:
|
|
284
|
+
storage = ResultStorage(verbose=verbose)
|
|
285
|
+
storage_result = storage.store_results(
|
|
286
|
+
conn, network, solve_result, scenario_id
|
|
287
|
+
)
|
|
288
|
+
solve_result["storage_stats"] = storage_result
|
|
289
|
+
|
|
290
|
+
if progress_callback:
|
|
291
|
+
progress_callback(100, "Complete")
|
|
292
|
+
|
|
293
|
+
return solve_result
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def load_network_components(
|
|
297
|
+
db_path: str, scenario_id: Optional[int] = None
|
|
298
|
+
) -> Dict[str, Any]:
|
|
299
|
+
"""
|
|
300
|
+
Load all network components and attributes as structured data (single network per database).
|
|
301
|
+
|
|
302
|
+
This low-level function loads network data without building a PyPSA network.
|
|
303
|
+
Useful for analysis, validation, or building custom network representations.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
db_path: Path to the database file
|
|
307
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
Dictionary containing all network components and metadata
|
|
311
|
+
|
|
312
|
+
Raises:
|
|
313
|
+
DatabaseError: If database operations fail
|
|
314
|
+
"""
|
|
315
|
+
with database_context(db_path) as conn:
|
|
316
|
+
builder = NetworkBuilder()
|
|
317
|
+
return builder.load_network_data(conn, scenario_id)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def apply_constraints(
|
|
321
|
+
network: "pypsa.Network",
|
|
322
|
+
db_path: str,
|
|
323
|
+
scenario_id: Optional[int] = None,
|
|
324
|
+
constraints_dsl: Optional[str] = None,
|
|
325
|
+
) -> None:
|
|
326
|
+
"""
|
|
327
|
+
Apply custom constraints to PyPSA network (single network per database).
|
|
328
|
+
|
|
329
|
+
This function applies database-stored constraints and optional DSL constraints
|
|
330
|
+
to an existing PyPSA network. Modifies the network in-place.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
network: PyPSA Network object to modify
|
|
334
|
+
db_path: Path to the database file
|
|
335
|
+
scenario_id: Optional scenario ID (NULL for base network)
|
|
336
|
+
constraints_dsl: Optional DSL constraints string
|
|
337
|
+
|
|
338
|
+
Raises:
|
|
339
|
+
DatabaseError: If database operations fail
|
|
340
|
+
ValidationError: If constraints are invalid
|
|
341
|
+
"""
|
|
342
|
+
with database_context(db_path) as conn:
|
|
343
|
+
constraint_applicator = ConstraintApplicator()
|
|
344
|
+
constraint_applicator.apply_constraints(
|
|
345
|
+
conn, network, scenario_id, constraints_dsl
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
def store_solve_results(
|
|
350
|
+
network: "pypsa.Network",
|
|
351
|
+
db_path: str,
|
|
352
|
+
scenario_id: Optional[int],
|
|
353
|
+
solve_metadata: Dict[str, Any],
|
|
354
|
+
verbose: bool = False,
|
|
355
|
+
) -> Dict[str, Any]:
|
|
356
|
+
"""
|
|
357
|
+
Store PyPSA solve results back to database (single network per database).
|
|
358
|
+
|
|
359
|
+
This low-level function stores solve results from a PyPSA network back
|
|
360
|
+
to the database. Useful when you want full control over the solving process
|
|
361
|
+
but still want to store results in the standard format.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
network: Solved PyPSA Network object
|
|
365
|
+
db_path: Path to the database file
|
|
366
|
+
scenario_id: Scenario ID for result storage (NULL for base network)
|
|
367
|
+
solve_metadata: Dictionary with solve metadata (solver_name, solve_time, etc.)
|
|
368
|
+
verbose: Enable detailed logging output (default: False)
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
Dictionary with storage statistics
|
|
372
|
+
|
|
373
|
+
Raises:
|
|
374
|
+
DatabaseError: If database operations fail
|
|
375
|
+
"""
|
|
376
|
+
with database_context(db_path) as conn:
|
|
377
|
+
storage = ResultStorage(verbose=verbose)
|
|
378
|
+
return storage.store_results(conn, network, solve_metadata, scenario_id)
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def _transform_to_comprehensive_format(
|
|
382
|
+
pyconvexity_result: Dict[str, Any],
|
|
383
|
+
) -> Dict[str, Any]:
|
|
384
|
+
"""
|
|
385
|
+
Transform pyconvexity result to comprehensive format that includes both
|
|
386
|
+
the original structure and sidecar-compatible fields.
|
|
387
|
+
|
|
388
|
+
This ensures compatibility with existing sidecar code while providing
|
|
389
|
+
a clean API for direct pyconvexity users.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
pyconvexity_result: Result from pyconvexity solve operations
|
|
393
|
+
|
|
394
|
+
Returns:
|
|
395
|
+
Comprehensive result with both original and sidecar-compatible fields
|
|
396
|
+
"""
|
|
397
|
+
try:
|
|
398
|
+
# Extract basic solve information
|
|
399
|
+
success = pyconvexity_result.get("success", False)
|
|
400
|
+
status = pyconvexity_result.get("status", "unknown")
|
|
401
|
+
solve_time = pyconvexity_result.get("solve_time", 0.0)
|
|
402
|
+
objective_value = pyconvexity_result.get("objective_value")
|
|
403
|
+
|
|
404
|
+
# Extract storage stats
|
|
405
|
+
storage_stats = pyconvexity_result.get("storage_stats", {})
|
|
406
|
+
component_stats = storage_stats.get("component_stats", {})
|
|
407
|
+
network_stats = storage_stats.get("network_stats", {})
|
|
408
|
+
|
|
409
|
+
# Create comprehensive result that includes both formats
|
|
410
|
+
comprehensive_result = {
|
|
411
|
+
# Original pyconvexity format (for direct users)
|
|
412
|
+
**pyconvexity_result,
|
|
413
|
+
# Sidecar-compatible format (for backward compatibility)
|
|
414
|
+
"network_statistics": {
|
|
415
|
+
"total_generation_mwh": network_stats.get("total_generation_mwh", 0.0),
|
|
416
|
+
"total_load_mwh": network_stats.get("total_load_mwh", 0.0),
|
|
417
|
+
"unmet_load_mwh": network_stats.get("unmet_load_mwh", 0.0),
|
|
418
|
+
"total_cost": network_stats.get("total_cost", objective_value or 0.0),
|
|
419
|
+
"num_buses": network_stats.get("num_buses", 0),
|
|
420
|
+
"num_generators": network_stats.get("num_generators", 0),
|
|
421
|
+
"num_loads": network_stats.get("num_loads", 0),
|
|
422
|
+
"num_lines": network_stats.get("num_lines", 0),
|
|
423
|
+
"num_links": network_stats.get("num_links", 0),
|
|
424
|
+
},
|
|
425
|
+
"component_storage_stats": {
|
|
426
|
+
"stored_bus_results": component_stats.get("stored_bus_results", 0),
|
|
427
|
+
"stored_generator_results": component_stats.get(
|
|
428
|
+
"stored_generator_results", 0
|
|
429
|
+
),
|
|
430
|
+
"stored_unmet_load_results": component_stats.get(
|
|
431
|
+
"stored_unmet_load_results", 0
|
|
432
|
+
),
|
|
433
|
+
"stored_load_results": component_stats.get("stored_load_results", 0),
|
|
434
|
+
"stored_line_results": component_stats.get("stored_line_results", 0),
|
|
435
|
+
"stored_link_results": component_stats.get("stored_link_results", 0),
|
|
436
|
+
"stored_storage_unit_results": component_stats.get(
|
|
437
|
+
"stored_storage_unit_results", 0
|
|
438
|
+
),
|
|
439
|
+
"stored_store_results": component_stats.get("stored_store_results", 0),
|
|
440
|
+
"skipped_attributes": component_stats.get("skipped_attributes", 0),
|
|
441
|
+
"errors": component_stats.get("errors", 0),
|
|
442
|
+
},
|
|
443
|
+
# Additional compatibility fields
|
|
444
|
+
"multi_period": pyconvexity_result.get("multi_period", False),
|
|
445
|
+
"years": pyconvexity_result.get("years", []),
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
return comprehensive_result
|
|
449
|
+
|
|
450
|
+
except Exception as e:
|
|
451
|
+
logger.error(
|
|
452
|
+
f"Failed to transform result to comprehensive format: {e}", exc_info=True
|
|
453
|
+
)
|
|
454
|
+
# Return original result with error info if transformation fails
|
|
455
|
+
return {
|
|
456
|
+
**pyconvexity_result,
|
|
457
|
+
"transformation_error": str(e),
|
|
458
|
+
"network_statistics": {},
|
|
459
|
+
"component_storage_stats": {},
|
|
460
|
+
}
|