pyconvexity 0.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyconvexity might be problematic. Click here for more details.

Files changed (44) hide show
  1. pyconvexity/__init__.py +241 -0
  2. pyconvexity/_version.py +1 -0
  3. pyconvexity/core/__init__.py +60 -0
  4. pyconvexity/core/database.py +485 -0
  5. pyconvexity/core/errors.py +106 -0
  6. pyconvexity/core/types.py +400 -0
  7. pyconvexity/dashboard.py +265 -0
  8. pyconvexity/data/README.md +101 -0
  9. pyconvexity/data/__init__.py +17 -0
  10. pyconvexity/data/loaders/__init__.py +3 -0
  11. pyconvexity/data/loaders/cache.py +213 -0
  12. pyconvexity/data/schema/01_core_schema.sql +420 -0
  13. pyconvexity/data/schema/02_data_metadata.sql +120 -0
  14. pyconvexity/data/schema/03_validation_data.sql +507 -0
  15. pyconvexity/data/sources/__init__.py +5 -0
  16. pyconvexity/data/sources/gem.py +442 -0
  17. pyconvexity/io/__init__.py +26 -0
  18. pyconvexity/io/excel_exporter.py +1226 -0
  19. pyconvexity/io/excel_importer.py +1381 -0
  20. pyconvexity/io/netcdf_exporter.py +191 -0
  21. pyconvexity/io/netcdf_importer.py +1802 -0
  22. pyconvexity/models/__init__.py +195 -0
  23. pyconvexity/models/attributes.py +730 -0
  24. pyconvexity/models/carriers.py +159 -0
  25. pyconvexity/models/components.py +611 -0
  26. pyconvexity/models/network.py +503 -0
  27. pyconvexity/models/results.py +148 -0
  28. pyconvexity/models/scenarios.py +234 -0
  29. pyconvexity/solvers/__init__.py +29 -0
  30. pyconvexity/solvers/pypsa/__init__.py +30 -0
  31. pyconvexity/solvers/pypsa/api.py +446 -0
  32. pyconvexity/solvers/pypsa/batch_loader.py +296 -0
  33. pyconvexity/solvers/pypsa/builder.py +655 -0
  34. pyconvexity/solvers/pypsa/clearing_price.py +678 -0
  35. pyconvexity/solvers/pypsa/constraints.py +405 -0
  36. pyconvexity/solvers/pypsa/solver.py +1442 -0
  37. pyconvexity/solvers/pypsa/storage.py +2096 -0
  38. pyconvexity/timeseries.py +330 -0
  39. pyconvexity/validation/__init__.py +25 -0
  40. pyconvexity/validation/rules.py +312 -0
  41. pyconvexity-0.4.8.dist-info/METADATA +148 -0
  42. pyconvexity-0.4.8.dist-info/RECORD +44 -0
  43. pyconvexity-0.4.8.dist-info/WHEEL +5 -0
  44. pyconvexity-0.4.8.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1442 @@
1
+ """
2
+ Solving functionality for PyPSA networks.
3
+
4
+ Simplified to always use multi-period optimization for consistency.
5
+ """
6
+
7
+ import time
8
+ import uuid
9
+ import pandas as pd
10
+ import numpy as np
11
+ from typing import Dict, Any, Optional
12
+
13
+
14
+ class NetworkSolver:
15
+ """
16
+ Simplified PyPSA network solver that always uses multi-period optimization.
17
+
18
+ This ensures consistent behavior for both single-year and multi-year models.
19
+ """
20
+
21
+ def __init__(self, verbose: bool = False):
22
+ """
23
+ Initialize NetworkSolver.
24
+
25
+ Args:
26
+ verbose: Enable detailed logging output
27
+ """
28
+ self.verbose = verbose
29
+
30
+ # Import PyPSA with error handling
31
+ try:
32
+ import pypsa
33
+
34
+ self.pypsa = pypsa
35
+ except ImportError as e:
36
+ raise ImportError(
37
+ "PyPSA is not installed or could not be imported. "
38
+ "Please ensure it is installed correctly in the environment."
39
+ ) from e
40
+
41
+ def _get_user_settings_path(self):
42
+ """Get the path to the user settings file (same location as Tauri uses)"""
43
+ try:
44
+ import platform
45
+ import os
46
+ from pathlib import Path
47
+
48
+ system = platform.system()
49
+ if system == "Darwin": # macOS
50
+ home = Path.home()
51
+ app_data_dir = (
52
+ home / "Library" / "Application Support" / "com.convexity.desktop"
53
+ )
54
+ elif system == "Windows":
55
+ app_data_dir = (
56
+ Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
57
+ )
58
+ else: # Linux
59
+ home = Path.home()
60
+ app_data_dir = home / ".local" / "share" / "com.convexity.desktop"
61
+
62
+ settings_file = app_data_dir / "user_settings.json"
63
+ return settings_file if settings_file.exists() else None
64
+
65
+ except Exception as e:
66
+ return None
67
+
68
+ def _resolve_default_solver(self) -> str:
69
+ """Resolve 'default' solver to user's preferred solver"""
70
+ try:
71
+ import json
72
+
73
+ settings_path = self._get_user_settings_path()
74
+ if not settings_path:
75
+ return "highs"
76
+
77
+ with open(settings_path, "r") as f:
78
+ user_settings = json.load(f)
79
+
80
+ # Get default solver from user settings
81
+ default_solver = user_settings.get("default_solver", "highs")
82
+
83
+ # Validate that it's a known solver
84
+ known_solvers = [
85
+ "highs",
86
+ "gurobi",
87
+ "gurobi (barrier)",
88
+ "gurobi (barrier homogeneous)",
89
+ "gurobi (barrier+crossover balanced)",
90
+ "gurobi (dual simplex)",
91
+ "mosek",
92
+ "mosek (default)",
93
+ "mosek (barrier)",
94
+ "mosek (barrier+crossover)",
95
+ "mosek (dual simplex)",
96
+ "copt",
97
+ "copt (barrier)",
98
+ "copt (barrier homogeneous)",
99
+ "copt (barrier+crossover)",
100
+ "copt (dual simplex)",
101
+ "copt (concurrent)",
102
+ "cplex",
103
+ "glpk",
104
+ "cbc",
105
+ "scip",
106
+ ]
107
+
108
+ if default_solver in known_solvers:
109
+ return default_solver
110
+ else:
111
+ return "highs"
112
+
113
+ except Exception as e:
114
+ return "highs"
115
+
116
+ def solve_network(
117
+ self,
118
+ network: "pypsa.Network",
119
+ solver_name: str = "highs",
120
+ solver_options: Optional[Dict[str, Any]] = None,
121
+ discount_rate: Optional[float] = None,
122
+ job_id: Optional[str] = None,
123
+ conn=None,
124
+ scenario_id: Optional[int] = None,
125
+ constraint_applicator=None,
126
+ custom_solver_config: Optional[Dict[str, Any]] = None,
127
+ ) -> Dict[str, Any]:
128
+ """
129
+ Solve PyPSA network and return results.
130
+
131
+ Args:
132
+ network: PyPSA Network object to solve
133
+ solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
134
+ solver_options: Optional solver-specific options
135
+ discount_rate: Optional discount rate for multi-period optimization
136
+ job_id: Optional job ID for tracking
137
+ custom_solver_config: Optional custom solver configuration when solver_name="custom"
138
+ Format: {"solver": "actual_solver_name", "solver_options": {...}}
139
+ Example: {"solver": "gurobi", "solver_options": {"Method": 2, "Crossover": 0}}
140
+
141
+ Returns:
142
+ Dictionary with solve results and metadata
143
+
144
+ Raises:
145
+ ImportError: If PyPSA is not available
146
+ Exception: If solving fails
147
+ """
148
+ start_time = time.time()
149
+ run_id = str(uuid.uuid4())
150
+
151
+ try:
152
+ # Get solver configuration
153
+ actual_solver_name, solver_config = self._get_solver_config(
154
+ solver_name, solver_options, custom_solver_config
155
+ )
156
+
157
+ # Resolve discount rate - fallback to 0.0 if None
158
+ # Note: API layer (api.py) handles fetching from network_config before calling this
159
+ effective_discount_rate = (
160
+ discount_rate if discount_rate is not None else 0.0
161
+ )
162
+
163
+ years = list(network.investment_periods)
164
+
165
+ # Calculate investment period weightings with discount rate
166
+ self._calculate_investment_weightings(network, effective_discount_rate)
167
+
168
+ # Set snapshot weightings after multi-period setup
169
+ if conn:
170
+ self._set_snapshot_weightings_after_multiperiod(conn, network)
171
+
172
+ # Prepare optimization constraints - ONLY model constraints
173
+ # Network constraints were already applied before solve in api.py
174
+ extra_functionality = None
175
+ model_constraints = []
176
+
177
+ if conn and constraint_applicator:
178
+ optimization_constraints = (
179
+ constraint_applicator.get_optimization_constraints(
180
+ conn, scenario_id
181
+ )
182
+ )
183
+ if optimization_constraints:
184
+ # Filter for model constraints only (network constraints already applied)
185
+ for constraint in optimization_constraints:
186
+ constraint_code = constraint.get("constraint_code", "")
187
+ constraint_type = self._detect_constraint_type(constraint_code)
188
+
189
+ if constraint_type == "model_constraint":
190
+ model_constraints.append(constraint)
191
+
192
+ if model_constraints:
193
+ extra_functionality = self._create_extra_functionality(
194
+ model_constraints, constraint_applicator
195
+ )
196
+
197
+ # NOTE: Model constraints are applied DURING solve via extra_functionality
198
+ # Network constraints were already applied to the network structure before solve
199
+
200
+ if solver_config:
201
+ result = network.optimize(
202
+ solver_name=actual_solver_name,
203
+ multi_investment_periods=True,
204
+ extra_functionality=extra_functionality,
205
+ **solver_config,
206
+ )
207
+ else:
208
+ result = network.optimize(
209
+ solver_name=actual_solver_name,
210
+ multi_investment_periods=True,
211
+ extra_functionality=extra_functionality,
212
+ )
213
+
214
+ solve_time = time.time() - start_time
215
+
216
+ # Extract solve results with comprehensive statistics
217
+ solve_result = self._extract_solve_results(
218
+ network, result, solve_time, actual_solver_name, run_id
219
+ )
220
+
221
+ # Calculate comprehensive network statistics (all years combined)
222
+ if solve_result.get("success"):
223
+ network_statistics = self._calculate_comprehensive_network_statistics(
224
+ network, solve_time, actual_solver_name
225
+ )
226
+ solve_result["network_statistics"] = network_statistics
227
+
228
+ # Calculate year-based statistics for capacity expansion analysis
229
+ year_statistics = self._calculate_statistics_by_year(
230
+ network, solve_time, actual_solver_name
231
+ )
232
+ solve_result["year_statistics"] = year_statistics
233
+ solve_result["year_statistics_available"] = len(year_statistics) > 0
234
+
235
+ return solve_result
236
+
237
+ except Exception as e:
238
+ solve_time = time.time() - start_time
239
+
240
+ return {
241
+ "success": False,
242
+ "status": "failed",
243
+ "error": str(e),
244
+ "solve_time": solve_time,
245
+ "solver_name": (
246
+ actual_solver_name
247
+ if "actual_solver_name" in locals()
248
+ else solver_name
249
+ ),
250
+ "run_id": run_id,
251
+ "objective_value": None,
252
+ }
253
+
254
+ def _get_solver_config(
255
+ self,
256
+ solver_name: str,
257
+ solver_options: Optional[Dict[str, Any]] = None,
258
+ custom_solver_config: Optional[Dict[str, Any]] = None,
259
+ ) -> tuple[str, Optional[Dict[str, Any]]]:
260
+ """
261
+ Get the actual solver name and options for special solver configurations.
262
+
263
+ Args:
264
+ solver_name: The solver name (e.g., 'gurobi (barrier)', 'highs', 'custom')
265
+ solver_options: Optional additional solver options
266
+ custom_solver_config: Optional custom solver configuration for solver_name='custom'
267
+ Format: {"solver": "actual_solver_name", "solver_options": {...}}
268
+
269
+ Returns:
270
+ Tuple of (actual_solver_name, solver_options_dict)
271
+ """
272
+ # Handle "custom" solver with custom configuration
273
+ if solver_name == "custom":
274
+ if not custom_solver_config:
275
+ raise ValueError(
276
+ "custom_solver_config must be provided when solver_name='custom'"
277
+ )
278
+
279
+ if "solver" not in custom_solver_config:
280
+ raise ValueError(
281
+ "custom_solver_config must contain 'solver' key with the actual solver name"
282
+ )
283
+
284
+ actual_solver = custom_solver_config["solver"]
285
+ custom_options = custom_solver_config.get("solver_options", {})
286
+
287
+ # Merge with any additional solver_options passed separately
288
+ if solver_options:
289
+ merged_options = {
290
+ "solver_options": {**custom_options, **solver_options}
291
+ }
292
+ else:
293
+ merged_options = (
294
+ {"solver_options": custom_options} if custom_options else None
295
+ )
296
+
297
+ return actual_solver, merged_options
298
+
299
+ # Handle "default" solver
300
+ if solver_name == "default":
301
+ # Try to read user's default solver preference
302
+ actual_solver = self._resolve_default_solver()
303
+ return actual_solver, solver_options
304
+
305
+ # Handle special Gurobi configurations
306
+ if solver_name == "gurobi (barrier)":
307
+ gurobi_barrier_options = {
308
+ "solver_options": {
309
+ "Method": 2, # Barrier
310
+ "Crossover": 0, # Skip crossover
311
+ "MIPGap": 0.05, # 5% gap
312
+ "Threads": 0, # Use all cores (0 = auto)
313
+ "Presolve": 2, # Aggressive presolve
314
+ "ConcurrentMIP": 1, # Parallel root strategies
315
+ "BarConvTol": 1e-4, # Relaxed barrier convergence
316
+ "FeasibilityTol": 1e-5,
317
+ "OptimalityTol": 1e-5,
318
+ "NumericFocus": 1, # Improve stability
319
+ "PreSparsify": 1,
320
+ }
321
+ }
322
+ # Merge with any additional options
323
+ if solver_options:
324
+ gurobi_barrier_options.update(solver_options)
325
+ return "gurobi", gurobi_barrier_options
326
+
327
+ elif solver_name == "gurobi (barrier homogeneous)":
328
+ gurobi_barrier_homogeneous_options = {
329
+ "solver_options": {
330
+ "Method": 2, # Barrier
331
+ "Crossover": 0, # Skip crossover
332
+ "MIPGap": 0.05,
333
+ "Threads": 0, # Use all cores (0 = auto)
334
+ "Presolve": 2,
335
+ "ConcurrentMIP": 1,
336
+ "BarConvTol": 1e-4,
337
+ "FeasibilityTol": 1e-5,
338
+ "OptimalityTol": 1e-5,
339
+ "NumericFocus": 1,
340
+ "PreSparsify": 1,
341
+ "BarHomogeneous": 1, # Enable homogeneous barrier algorithm
342
+ }
343
+ }
344
+ if solver_options:
345
+ gurobi_barrier_homogeneous_options.update(solver_options)
346
+ return "gurobi", gurobi_barrier_homogeneous_options
347
+
348
+ elif solver_name == "gurobi (barrier+crossover balanced)":
349
+ gurobi_options_balanced = {
350
+ "solver_options": {
351
+ "Method": 2,
352
+ "Crossover": 1, # Dual crossover
353
+ "MIPGap": 0.01,
354
+ "Threads": 0, # Use all cores (0 = auto)
355
+ "Presolve": 2,
356
+ "Heuristics": 0.1,
357
+ "Cuts": 2,
358
+ "ConcurrentMIP": 1,
359
+ "BarConvTol": 1e-6,
360
+ "FeasibilityTol": 1e-6,
361
+ "OptimalityTol": 1e-6,
362
+ "NumericFocus": 1,
363
+ "PreSparsify": 1,
364
+ }
365
+ }
366
+ if solver_options:
367
+ gurobi_options_balanced.update(solver_options)
368
+ return "gurobi", gurobi_options_balanced
369
+
370
+ elif solver_name == "gurobi (dual simplex)":
371
+ gurobi_dual_options = {
372
+ "solver_options": {
373
+ "Method": 1, # Dual simplex method
374
+ "Threads": 0, # Use all available cores
375
+ "Presolve": 2, # Aggressive presolve
376
+ }
377
+ }
378
+ if solver_options:
379
+ gurobi_dual_options.update(solver_options)
380
+ return "gurobi", gurobi_dual_options
381
+
382
+ # Handle special Mosek configurations
383
+ elif solver_name == "mosek (default)":
384
+ # No custom options - let Mosek use its default configuration
385
+ mosek_default_options = {
386
+ "solver_options": {
387
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # MIP relative gap tolerance (5% to match Gurobi)
388
+ "MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
389
+ }
390
+ }
391
+ if solver_options:
392
+ mosek_default_options["solver_options"].update(solver_options)
393
+ return "mosek", mosek_default_options
394
+
395
+ elif solver_name == "mosek (barrier)":
396
+ mosek_barrier_options = {
397
+ "solver_options": {
398
+ "MSK_IPAR_INTPNT_BASIS": 0, # Skip crossover (barrier-only) - 0 = MSK_BI_NEVER
399
+ "MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance
400
+ "MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi primal feasibility
401
+ "MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi dual feasibility
402
+ # Removed MSK_DPAR_INTPNT_TOL_INFEAS - was 1000x tighter than other tolerances!
403
+ "MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
404
+ "MSK_IPAR_PRESOLVE_USE": 2, # Aggressive presolve (match Gurobi Presolve=2)
405
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap
406
+ "MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
407
+ "MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour
408
+ }
409
+ }
410
+ if solver_options:
411
+ mosek_barrier_options["solver_options"].update(solver_options)
412
+ return "mosek", mosek_barrier_options
413
+
414
+ elif solver_name == "mosek (barrier+crossover)":
415
+ mosek_barrier_crossover_options = {
416
+ "solver_options": {
417
+ "MSK_IPAR_INTPNT_BASIS": 1, # Always crossover (1 = MSK_BI_ALWAYS)
418
+ "MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance (was 1e-6)
419
+ "MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi (was 1e-6)
420
+ "MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi (was 1e-6)
421
+ "MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
422
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
423
+ "MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
424
+ "MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
425
+ }
426
+ }
427
+ if solver_options:
428
+ mosek_barrier_crossover_options["solver_options"].update(solver_options)
429
+ return "mosek", mosek_barrier_crossover_options
430
+
431
+ elif solver_name == "mosek (dual simplex)":
432
+ mosek_dual_options = {
433
+ "solver_options": {
434
+ "MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = automatic)
435
+ "MSK_IPAR_PRESOLVE_USE": 1, # Force presolve
436
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
437
+ "MSK_IPAR_MIO_ROOT_OPTIMIZER": 1, # Use dual simplex for MIP root
438
+ "MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
439
+ }
440
+ }
441
+ if solver_options:
442
+ mosek_dual_options["solver_options"].update(solver_options)
443
+ return "mosek", mosek_dual_options
444
+
445
+ # Check if this is a known valid solver name
446
+ elif solver_name == "mosek":
447
+ # Add default MILP-friendly settings for plain Mosek
448
+ mosek_defaults = {
449
+ "solver_options": {
450
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-4)
451
+ "MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
452
+ "MSK_IPAR_NUM_THREADS": 0, # Use all cores (0 = auto)
453
+ }
454
+ }
455
+ if solver_options:
456
+ mosek_defaults["solver_options"].update(solver_options)
457
+ return solver_name, mosek_defaults
458
+
459
+ elif solver_name == "gurobi":
460
+ # Add default MILP-friendly settings for plain Gurobi (for consistency)
461
+ gurobi_defaults = {
462
+ "solver_options": {
463
+ "MIPGap": 1e-4, # 0.01% gap
464
+ "TimeLimit": 3600, # 1 hour
465
+ "Threads": 0, # Use all cores
466
+ "OutputFlag": 1, # Enable output
467
+ }
468
+ }
469
+ if solver_options:
470
+ gurobi_defaults["solver_options"].update(solver_options)
471
+ return solver_name, gurobi_defaults
472
+
473
+ # Handle special COPT configurations
474
+ elif solver_name == "copt (barrier)":
475
+ copt_barrier_options = {
476
+ "solver_options": {
477
+ "LpMethod": 2, # Barrier method
478
+ "Crossover": 0, # Skip crossover for speed
479
+ "RelGap": 0.05, # 5% MIP gap (match Gurobi)
480
+ "TimeLimit": 7200, # 1 hour time limit
481
+ "Threads": -1, # 4 threads (memory-conscious)
482
+ "Presolve": 3, # Aggressive presolve
483
+ "Scaling": 1, # Enable scaling
484
+ "FeasTol": 1e-5, # Match Gurobi feasibility
485
+ "DualTol": 1e-5, # Match Gurobi dual tolerance
486
+ # MIP performance settings
487
+ "CutLevel": 2, # Normal cut generation
488
+ "HeurLevel": 3, # Aggressive heuristics
489
+ "StrongBranching": 1, # Fast strong branching
490
+ }
491
+ }
492
+ if solver_options:
493
+ copt_barrier_options["solver_options"].update(solver_options)
494
+ return "copt", copt_barrier_options
495
+
496
+ elif solver_name == "copt (barrier homogeneous)":
497
+ copt_barrier_homogeneous_options = {
498
+ "solver_options": {
499
+ "LpMethod": 2, # Barrier method
500
+ "Crossover": 0, # Skip crossover
501
+ "BarHomogeneous": 1, # Use homogeneous self-dual form
502
+ "RelGap": 0.05, # 5% MIP gap
503
+ "TimeLimit": 3600, # 1 hour
504
+ "Threads": -1, # 4 threads (memory-conscious)
505
+ "Presolve": 3, # Aggressive presolve
506
+ "Scaling": 1, # Enable scaling
507
+ "FeasTol": 1e-5,
508
+ "DualTol": 1e-5,
509
+ # MIP performance settings
510
+ "CutLevel": 2, # Normal cuts
511
+ "HeurLevel": 3, # Aggressive heuristics
512
+ "StrongBranching": 1, # Fast strong branching
513
+ }
514
+ }
515
+ if solver_options:
516
+ copt_barrier_homogeneous_options["solver_options"].update(
517
+ solver_options
518
+ )
519
+ return "copt", copt_barrier_homogeneous_options
520
+
521
+ elif solver_name == "copt (barrier+crossover)":
522
+ copt_barrier_crossover_options = {
523
+ "solver_options": {
524
+ "LpMethod": 2, # Barrier method
525
+ "Crossover": 1, # Enable crossover for better solutions
526
+ "RelGap": 0.05, # 5% MIP gap (relaxed for faster solves)
527
+ "TimeLimit": 36000, # 10 hour
528
+ "Threads": -1, # Use all cores
529
+ "Presolve": 2, # Aggressive presolve
530
+ "Scaling": 1, # Enable scaling
531
+ "FeasTol": 1e-4, # Tighter feasibility
532
+ "DualTol": 1e-4, # Tighter dual tolerance
533
+ }
534
+ }
535
+ if solver_options:
536
+ copt_barrier_crossover_options["solver_options"].update(solver_options)
537
+ return "copt", copt_barrier_crossover_options
538
+
539
+ elif solver_name == "copt (dual simplex)":
540
+ copt_dual_simplex_options = {
541
+ "solver_options": {
542
+ "LpMethod": 1, # Dual simplex method
543
+ "RelGap": 0.05, # 5% MIP gap
544
+ "TimeLimit": 3600, # 1 hour
545
+ "Threads": -1, # Use all cores
546
+ "Presolve": 3, # Aggressive presolve
547
+ "Scaling": 1, # Enable scaling
548
+ "FeasTol": 1e-6,
549
+ "DualTol": 1e-6,
550
+ # MIP performance settings
551
+ "CutLevel": 2, # Normal cuts
552
+ "HeurLevel": 2, # Normal heuristics
553
+ "StrongBranching": 1, # Fast strong branching
554
+ }
555
+ }
556
+ if solver_options:
557
+ copt_dual_simplex_options["solver_options"].update(solver_options)
558
+ return "copt", copt_dual_simplex_options
559
+
560
+ elif solver_name == "copt (concurrent)":
561
+ copt_concurrent_options = {
562
+ "solver_options": {
563
+ "LpMethod": 4, # Concurrent (simplex + barrier)
564
+ "RelGap": 0.05, # 5% MIP gap
565
+ "TimeLimit": 3600, # 1 hour
566
+ "Threads": -1, # Use all cores
567
+ "Presolve": 3, # Aggressive presolve
568
+ "Scaling": 1, # Enable scaling
569
+ "FeasTol": 1e-5,
570
+ "DualTol": 1e-5,
571
+ # MIP performance settings
572
+ "CutLevel": 2, # Normal cuts
573
+ "HeurLevel": 3, # Aggressive heuristics
574
+ "StrongBranching": 1, # Fast strong branching
575
+ }
576
+ }
577
+ if solver_options:
578
+ copt_concurrent_options["solver_options"].update(solver_options)
579
+ return "copt", copt_concurrent_options
580
+
581
+ elif solver_name in ["highs", "cplex", "glpk", "cbc", "scip", "copt"]:
582
+ return solver_name, solver_options
583
+
584
+ else:
585
+ # Unknown solver name - fall back to highs
586
+ return "highs", solver_options
587
+
588
+ def _detect_constraint_type(self, constraint_code: str) -> str:
589
+ """
590
+ Detect if constraint is network-modification or model-constraint type.
591
+
592
+ Args:
593
+ constraint_code: The constraint code to analyze
594
+
595
+ Returns:
596
+ "model_constraint" or "network_modification"
597
+ """
598
+ # Type 2 indicators (model constraints) - need access to optimization model
599
+ model_indicators = [
600
+ "n.optimize.create_model()",
601
+ "m.variables",
602
+ "m.add_constraints",
603
+ "gen_p =",
604
+ "constraint_expr =",
605
+ "LinearExpression",
606
+ "linopy",
607
+ "Generator-p",
608
+ "lhs <=",
609
+ "constraint_expr =",
610
+ ]
611
+
612
+ # Type 1 indicators (network modifications) - modify network directly
613
+ network_indicators = [
614
+ "n.generators.loc",
615
+ "n.add(",
616
+ "n.buses.",
617
+ "n.lines.",
618
+ "network.generators.loc",
619
+ "network.add(",
620
+ "network.buses.",
621
+ "network.lines.",
622
+ ]
623
+
624
+ # Check for model constraint indicators first (more specific)
625
+ if any(indicator in constraint_code for indicator in model_indicators):
626
+ return "model_constraint"
627
+ elif any(indicator in constraint_code for indicator in network_indicators):
628
+ return "network_modification"
629
+ else:
630
+ # Default to network_modification for safety (existing behavior)
631
+ return "network_modification"
632
+
633
+ def _create_extra_functionality(
634
+ self, optimization_constraints: list, constraint_applicator
635
+ ) -> callable:
636
+ """
637
+ Create extra_functionality function for optimization-time constraints.
638
+
639
+ This matches the old PyPSA solver's approach to applying constraints during optimization.
640
+
641
+ Args:
642
+ optimization_constraints: List of optimization constraint dictionaries
643
+ constraint_applicator: ConstraintApplicator instance
644
+
645
+ Returns:
646
+ Function that can be passed to network.optimize(extra_functionality=...)
647
+ """
648
+
649
+ def extra_functionality(network, snapshots):
650
+ """Apply optimization constraints during solve - matches old code structure"""
651
+ try:
652
+ # Apply each constraint in priority order
653
+ sorted_constraints = sorted(
654
+ optimization_constraints, key=lambda x: x.get("priority", 0)
655
+ )
656
+
657
+ for constraint in sorted_constraints:
658
+ try:
659
+ constraint_applicator.apply_optimization_constraint(
660
+ network, snapshots, constraint
661
+ )
662
+ except Exception as e:
663
+ continue
664
+
665
+ except Exception as e:
666
+ pass # Don't re-raise - let optimization continue
667
+
668
+ return extra_functionality
669
+
670
+ def _set_snapshot_weightings_after_multiperiod(
671
+ self, conn, network: "pypsa.Network"
672
+ ):
673
+ """Set snapshot weightings AFTER multi-period setup - matches old code approach (single network per database)."""
674
+ try:
675
+ from pyconvexity.models import get_network_time_periods, get_network_info
676
+
677
+ time_periods = get_network_time_periods(conn)
678
+ if time_periods and len(network.snapshots) > 0:
679
+ # Get network info to determine time interval
680
+ network_info = get_network_info(conn)
681
+ time_interval = network_info.get("time_interval", "1H")
682
+ weight = self._parse_time_interval(time_interval)
683
+
684
+ if weight is None:
685
+ weight = 1.0
686
+
687
+ # Create weightings array - all snapshots get the same weight for this time resolution
688
+ weightings = [weight] * len(time_periods)
689
+
690
+ if len(weightings) == len(network.snapshots):
691
+ # Set all three columns like the old code - critical for proper objective calculation
692
+ network.snapshot_weightings.loc[:, "objective"] = weightings
693
+ network.snapshot_weightings.loc[:, "generators"] = weightings
694
+ network.snapshot_weightings.loc[:, "stores"] = weightings
695
+ except Exception as e:
696
+ pass # Failed to set snapshot weightings
697
+
698
+ def _parse_time_interval(self, time_interval: str) -> Optional[float]:
699
+ """Parse time interval string to hours - handles multiple formats."""
700
+ if not time_interval:
701
+ return None
702
+
703
+ try:
704
+ # Clean up the string
705
+ interval = time_interval.strip()
706
+
707
+ # Handle ISO 8601 duration format (PT3H, PT30M, etc.)
708
+ if interval.startswith("PT") and interval.endswith("H"):
709
+ # Extract hours (e.g., 'PT3H' -> 3.0)
710
+ hours_str = interval[2:-1] # Remove 'PT' and 'H'
711
+ return float(hours_str)
712
+ elif interval.startswith("PT") and interval.endswith("M"):
713
+ # Extract minutes (e.g., 'PT30M' -> 0.5)
714
+ minutes_str = interval[2:-1] # Remove 'PT' and 'M'
715
+ return float(minutes_str) / 60.0
716
+ elif interval.startswith("PT") and interval.endswith("S"):
717
+ # Extract seconds (e.g., 'PT3600S' -> 1.0)
718
+ seconds_str = interval[2:-1] # Remove 'PT' and 'S'
719
+ return float(seconds_str) / 3600.0
720
+
721
+ # Handle simple frequency strings (3H, 2D, etc.)
722
+ elif interval.endswith("H") or interval.endswith("h"):
723
+ hours_str = interval[:-1]
724
+ return float(hours_str) if hours_str else 1.0
725
+ elif interval.endswith("D") or interval.endswith("d"):
726
+ days_str = interval[:-1]
727
+ return float(days_str) * 24 if days_str else 24.0
728
+ elif interval.endswith("M") or interval.endswith("m"):
729
+ minutes_str = interval[:-1]
730
+ return float(minutes_str) / 60.0 if minutes_str else 1.0 / 60.0
731
+ elif interval.endswith("S") or interval.endswith("s"):
732
+ seconds_str = interval[:-1]
733
+ return float(seconds_str) / 3600.0 if seconds_str else 1.0 / 3600.0
734
+
735
+ # Try to parse as plain number (assume hours)
736
+ else:
737
+ return float(interval)
738
+
739
+ except (ValueError, TypeError) as e:
740
+ return None
741
+
742
+ def _calculate_investment_weightings(
743
+ self, network: "pypsa.Network", discount_rate: float
744
+ ) -> None:
745
+ """
746
+ Calculate investment period weightings using discount rate - matching old PyPSA solver exactly.
747
+
748
+ Args:
749
+ network: PyPSA Network object
750
+ discount_rate: Discount rate for NPV calculations
751
+ """
752
+ try:
753
+ import pandas as pd
754
+
755
+ if (
756
+ not hasattr(network, "investment_periods")
757
+ or len(network.investment_periods) == 0
758
+ ):
759
+ return
760
+
761
+ years = network.investment_periods
762
+ # Convert pandas Index to list for easier handling
763
+ years_list = years.tolist() if hasattr(years, "tolist") else list(years)
764
+
765
+ # For single year, use simple weighting of 1.0
766
+ if len(years_list) == 1:
767
+ # Single year case
768
+ network.investment_period_weightings = pd.DataFrame(
769
+ {
770
+ "objective": pd.Series({years_list[0]: 1.0}),
771
+ "years": pd.Series({years_list[0]: 1}),
772
+ }
773
+ )
774
+ else:
775
+ # Multi-year case - EXACTLY match old code logic
776
+ # Get unique years from the network snapshots to determine period lengths
777
+ if hasattr(network.snapshots, "year"):
778
+ snapshot_years = sorted(network.snapshots.year.unique())
779
+ elif hasattr(network.snapshots, "get_level_values"):
780
+ # MultiIndex case - get years from 'period' level
781
+ snapshot_years = sorted(
782
+ network.snapshots.get_level_values("period").unique()
783
+ )
784
+ else:
785
+ # Fallback: use investment periods as years
786
+ snapshot_years = years_list
787
+
788
+ # Calculate years per period - EXACTLY matching old code
789
+ years_diff = []
790
+ for i, year in enumerate(years_list):
791
+ if i < len(years_list) - 1:
792
+ # Years between this period and the next
793
+ next_year = years_list[i + 1]
794
+ period_years = next_year - year
795
+ else:
796
+ # For the last period, calculate based on snapshot coverage
797
+ if snapshot_years:
798
+ # Find the last snapshot year that's >= current period year
799
+ last_snapshot_year = max(
800
+ [y for y in snapshot_years if y >= year]
801
+ )
802
+ period_years = last_snapshot_year - year + 1
803
+ else:
804
+ # Fallback: assume same length as previous period or 1
805
+ if len(years_diff) > 0:
806
+ period_years = years_diff[-1] # Same as previous period
807
+ else:
808
+ period_years = 1
809
+
810
+ years_diff.append(period_years)
811
+
812
+ # Create weightings DataFrame with years column
813
+ weightings_df = pd.DataFrame(
814
+ {"years": pd.Series(years_diff, index=years_list)}
815
+ )
816
+
817
+ # Calculate objective weightings with discount rate - EXACTLY matching old code
818
+ r = discount_rate
819
+ T = 0 # Cumulative time tracker
820
+
821
+ for period, nyears in weightings_df.years.items():
822
+ # Calculate discount factors for each year in this period
823
+ discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
824
+ period_weighting = sum(discounts)
825
+ weightings_df.at[period, "objective"] = period_weighting
826
+ T += nyears # Update cumulative time
827
+
828
+ network.investment_period_weightings = weightings_df
829
+
830
+ except Exception as e:
831
+ pass # Failed to calculate investment weightings
832
+
833
+ def _extract_solve_results(
834
+ self,
835
+ network: "pypsa.Network",
836
+ result: Any,
837
+ solve_time: float,
838
+ solver_name: str,
839
+ run_id: str,
840
+ ) -> Dict[str, Any]:
841
+ """
842
+ Extract solve results from PyPSA network.
843
+
844
+ Args:
845
+ network: Solved PyPSA Network object
846
+ result: PyPSA solve result
847
+ solve_time: Time taken to solve
848
+ solver_name: Name of solver used
849
+ run_id: Unique run identifier
850
+
851
+ Returns:
852
+ Dictionary with solve results and metadata
853
+ """
854
+ try:
855
+ # Extract basic solve information
856
+ status = getattr(result, "status", "unknown")
857
+ objective_value = getattr(network, "objective", None)
858
+
859
+ # Convert PyPSA result to dictionary format
860
+ result_dict = self._convert_pypsa_result_to_dict(result)
861
+
862
+ # Determine success based on multiple criteria
863
+ success = self._determine_solve_success(
864
+ result, network, status, objective_value
865
+ )
866
+
867
+ solve_result = {
868
+ "success": success,
869
+ "status": status,
870
+ "solve_time": solve_time,
871
+ "solver_name": solver_name,
872
+ "run_id": run_id,
873
+ "objective_value": objective_value,
874
+ "pypsa_result": result_dict,
875
+ "network_name": network.name,
876
+ "num_buses": len(network.buses),
877
+ "num_generators": len(network.generators),
878
+ "num_loads": len(network.loads),
879
+ "num_lines": len(network.lines),
880
+ "num_links": len(network.links),
881
+ "num_snapshots": len(network.snapshots),
882
+ }
883
+
884
+ # Add multi-period information if available
885
+ if hasattr(network, "_available_years") and network._available_years:
886
+ solve_result["years"] = network._available_years
887
+ solve_result["multi_period"] = len(network._available_years) > 1
888
+
889
+ return solve_result
890
+
891
+ except Exception as e:
892
+ return {
893
+ "success": False,
894
+ "status": "extraction_failed",
895
+ "error": f"Failed to extract results: {e}",
896
+ "solve_time": solve_time,
897
+ "solver_name": solver_name,
898
+ "run_id": run_id,
899
+ "objective_value": None,
900
+ }
901
+
902
+ def _determine_solve_success(
903
+ self,
904
+ result: Any,
905
+ network: "pypsa.Network",
906
+ status: str,
907
+ objective_value: Optional[float],
908
+ ) -> bool:
909
+ """
910
+ Determine if solve was successful based on multiple criteria.
911
+
912
+ PyPSA sometimes returns status='unknown' even for successful solves,
913
+ so we need to check multiple indicators.
914
+ """
915
+ try:
916
+ # Check explicit status first
917
+ if status in ["optimal", "feasible"]:
918
+ return True
919
+
920
+ # Check termination condition
921
+ if hasattr(result, "termination_condition"):
922
+ term_condition = str(result.termination_condition).lower()
923
+ if "optimal" in term_condition:
924
+ return True
925
+
926
+ # Check if we have a valid objective value
927
+ if objective_value is not None and not (
928
+ objective_value == 0 and status == "unknown"
929
+ ):
930
+ return True
931
+
932
+ # Check solver-specific success indicators
933
+ if hasattr(result, "solver"):
934
+ solver_info = result.solver
935
+ if hasattr(solver_info, "termination_condition"):
936
+ term_condition = str(solver_info.termination_condition).lower()
937
+ if "optimal" in term_condition:
938
+ return True
939
+
940
+ return False
941
+
942
+ except Exception as e:
943
+ return False
944
+
945
+ def _convert_pypsa_result_to_dict(self, result) -> Dict[str, Any]:
946
+ """
947
+ Convert PyPSA result object to dictionary.
948
+
949
+ Args:
950
+ result: PyPSA solve result object
951
+
952
+ Returns:
953
+ Dictionary representation of the result
954
+ """
955
+ try:
956
+ if result is None:
957
+ return {"status": "no_result"}
958
+
959
+ result_dict = {}
960
+
961
+ # Extract common attributes
962
+ for attr in ["status", "success", "termination_condition", "solver"]:
963
+ if hasattr(result, attr):
964
+ value = getattr(result, attr)
965
+ # Convert to serializable format
966
+ if hasattr(value, "__dict__"):
967
+ result_dict[attr] = str(value)
968
+ else:
969
+ result_dict[attr] = value
970
+
971
+ # Handle solver-specific information
972
+ if hasattr(result, "solver_results"):
973
+ solver_results = getattr(result, "solver_results")
974
+ if hasattr(solver_results, "__dict__"):
975
+ result_dict["solver_results"] = str(solver_results)
976
+ else:
977
+ result_dict["solver_results"] = solver_results
978
+
979
+ return result_dict
980
+
981
+ except Exception as e:
982
+ return {"status": "conversion_failed", "error": str(e)}
983
+
984
+ def _calculate_comprehensive_network_statistics(
985
+ self, network: "pypsa.Network", solve_time: float, solver_name: str
986
+ ) -> Dict[str, Any]:
987
+ """Calculate comprehensive network statistics including PyPSA statistics and custom metrics"""
988
+ try:
989
+ # Initialize statistics structure
990
+ statistics = {
991
+ "core_summary": {},
992
+ "pypsa_statistics": {},
993
+ "custom_statistics": {},
994
+ "runtime_info": {},
995
+ "solver_info": {},
996
+ }
997
+
998
+ # Core summary statistics
999
+ total_generation = 0
1000
+ total_demand = 0
1001
+ unserved_energy = 0
1002
+
1003
+ # Calculate generation statistics
1004
+ if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
1005
+ # Apply snapshot weightings to convert MW to MWh
1006
+ weightings = network.snapshot_weightings
1007
+ if isinstance(weightings, pd.DataFrame):
1008
+ if "objective" in weightings.columns:
1009
+ weighting_values = weightings["objective"].values
1010
+ else:
1011
+ weighting_values = weightings.iloc[:, 0].values
1012
+ else:
1013
+ weighting_values = weightings.values
1014
+
1015
+ total_generation = float(
1016
+ (network.generators_t.p.values * weighting_values[:, None]).sum()
1017
+ )
1018
+
1019
+ # Calculate unserved energy from UNMET_LOAD generators
1020
+ if hasattr(network, "generators") and hasattr(
1021
+ network, "_component_type_map"
1022
+ ):
1023
+ unmet_load_gen_names = [
1024
+ name
1025
+ for name, comp_type in network._component_type_map.items()
1026
+ if comp_type == "UNMET_LOAD"
1027
+ ]
1028
+
1029
+ for gen_name in unmet_load_gen_names:
1030
+ if gen_name in network.generators_t.p.columns:
1031
+ gen_output = float(
1032
+ (
1033
+ network.generators_t.p[gen_name] * weighting_values
1034
+ ).sum()
1035
+ )
1036
+ unserved_energy += gen_output
1037
+
1038
+ # Calculate demand statistics
1039
+ if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
1040
+ weightings = network.snapshot_weightings
1041
+ if isinstance(weightings, pd.DataFrame):
1042
+ if "objective" in weightings.columns:
1043
+ weighting_values = weightings["objective"].values
1044
+ else:
1045
+ weighting_values = weightings.iloc[:, 0].values
1046
+ else:
1047
+ weighting_values = weightings.values
1048
+
1049
+ total_demand = float(
1050
+ (network.loads_t.p.values * weighting_values[:, None]).sum()
1051
+ )
1052
+
1053
+ statistics["core_summary"] = {
1054
+ "total_generation_mwh": total_generation,
1055
+ "total_demand_mwh": total_demand,
1056
+ "total_cost": (
1057
+ float(network.objective) if hasattr(network, "objective") else None
1058
+ ),
1059
+ "load_factor": (
1060
+ (total_demand / (total_generation + 1e-6))
1061
+ if total_generation > 0
1062
+ else 0
1063
+ ),
1064
+ "unserved_energy_mwh": unserved_energy,
1065
+ }
1066
+
1067
+ # Calculate PyPSA statistics
1068
+ try:
1069
+ pypsa_stats = network.statistics()
1070
+ if pypsa_stats is not None and not pypsa_stats.empty:
1071
+ statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(
1072
+ pypsa_stats
1073
+ )
1074
+ else:
1075
+ statistics["pypsa_statistics"] = {}
1076
+ except Exception as e:
1077
+ statistics["pypsa_statistics"] = {}
1078
+
1079
+ # Custom statistics - calculate detailed breakdowns
1080
+ total_cost = (
1081
+ float(network.objective) if hasattr(network, "objective") else 0.0
1082
+ )
1083
+ avg_price = (
1084
+ (total_cost / (total_generation + 1e-6))
1085
+ if total_generation > 0
1086
+ else None
1087
+ )
1088
+ unmet_load_percentage = (
1089
+ (unserved_energy / (total_demand + 1e-6)) * 100
1090
+ if total_demand > 0
1091
+ else 0
1092
+ )
1093
+
1094
+ # Note: For solver statistics, we keep simplified approach since this is just for logging
1095
+ # The storage module will calculate proper totals from carrier statistics
1096
+ statistics["custom_statistics"] = {
1097
+ "total_capital_cost": 0.0, # Will be calculated properly in storage module
1098
+ "total_operational_cost": total_cost, # PyPSA objective (includes both capital and operational, discounted)
1099
+ "total_currency_cost": total_cost,
1100
+ "total_emissions_tons_co2": 0.0, # Will be calculated properly in storage module
1101
+ "average_price_per_mwh": avg_price,
1102
+ "unmet_load_percentage": unmet_load_percentage,
1103
+ "max_unmet_load_hour_mw": 0.0, # TODO: Calculate max hourly unmet load
1104
+ }
1105
+
1106
+ # Runtime info
1107
+ unmet_load_count = 0
1108
+ if hasattr(network, "_component_type_map"):
1109
+ unmet_load_count = len(
1110
+ [
1111
+ name
1112
+ for name, comp_type in network._component_type_map.items()
1113
+ if comp_type == "UNMET_LOAD"
1114
+ ]
1115
+ )
1116
+
1117
+ statistics["runtime_info"] = {
1118
+ "solve_time_seconds": solve_time,
1119
+ "component_count": (
1120
+ (
1121
+ len(network.buses)
1122
+ + len(network.generators)
1123
+ + len(network.loads)
1124
+ + len(network.lines)
1125
+ + len(network.links)
1126
+ )
1127
+ if hasattr(network, "buses")
1128
+ else 0
1129
+ ),
1130
+ "bus_count": len(network.buses) if hasattr(network, "buses") else 0,
1131
+ "generator_count": (
1132
+ len(network.generators) if hasattr(network, "generators") else 0
1133
+ ),
1134
+ "unmet_load_count": unmet_load_count,
1135
+ "load_count": len(network.loads) if hasattr(network, "loads") else 0,
1136
+ "line_count": len(network.lines) if hasattr(network, "lines") else 0,
1137
+ "snapshot_count": (
1138
+ len(network.snapshots) if hasattr(network, "snapshots") else 0
1139
+ ),
1140
+ }
1141
+
1142
+ # Solver info
1143
+ statistics["solver_info"] = {
1144
+ "solver_name": solver_name,
1145
+ "termination_condition": (
1146
+ "optimal" if hasattr(network, "objective") else "unknown"
1147
+ ),
1148
+ "objective_value": (
1149
+ float(network.objective) if hasattr(network, "objective") else None
1150
+ ),
1151
+ }
1152
+
1153
+ return statistics
1154
+
1155
+ except Exception as e:
1156
+ return {
1157
+ "error": str(e),
1158
+ "core_summary": {},
1159
+ "pypsa_statistics": {},
1160
+ "custom_statistics": {},
1161
+ "runtime_info": {"solve_time_seconds": solve_time},
1162
+ "solver_info": {"solver_name": solver_name},
1163
+ }
1164
+
1165
+ def _calculate_statistics_by_year(
1166
+ self, network: "pypsa.Network", solve_time: float, solver_name: str
1167
+ ) -> Dict[int, Dict[str, Any]]:
1168
+ """Calculate statistics for each year in the network"""
1169
+ try:
1170
+ # Extract years from network snapshots or manually extracted years
1171
+ if hasattr(network.snapshots, "year"):
1172
+ years = sorted(network.snapshots.year.unique())
1173
+ elif hasattr(network, "_available_years"):
1174
+ years = network._available_years
1175
+ elif hasattr(network.snapshots, "levels"):
1176
+ # Multi-period optimization - get years from period level
1177
+ period_values = network.snapshots.get_level_values(0)
1178
+ years = sorted(period_values.unique())
1179
+ else:
1180
+ # If no year info, skip year-based calculations
1181
+ return {}
1182
+
1183
+ year_statistics = {}
1184
+
1185
+ for year in years:
1186
+ try:
1187
+ year_stats = self._calculate_network_statistics_for_year(
1188
+ network, year, solve_time, solver_name
1189
+ )
1190
+ year_statistics[year] = year_stats
1191
+ except Exception as e:
1192
+ continue
1193
+
1194
+ return year_statistics
1195
+
1196
+ except Exception as e:
1197
+ return {}
1198
+
1199
+ def _calculate_network_statistics_for_year(
1200
+ self, network: "pypsa.Network", year: int, solve_time: float, solver_name: str
1201
+ ) -> Dict[str, Any]:
1202
+ """Calculate network statistics for a specific year"""
1203
+ try:
1204
+ # Initialize statistics structure
1205
+ statistics = {
1206
+ "core_summary": {},
1207
+ "custom_statistics": {},
1208
+ "runtime_info": {},
1209
+ "solver_info": {},
1210
+ }
1211
+
1212
+ # Core summary statistics for this year
1213
+ total_generation = 0
1214
+ total_demand = 0
1215
+ unserved_energy = 0
1216
+
1217
+ # Calculate generation statistics for this year
1218
+ if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
1219
+ # Filter by year
1220
+ year_generation = self._filter_timeseries_by_year(
1221
+ network.generators_t.p, network.snapshots, year
1222
+ )
1223
+ if year_generation is not None and not year_generation.empty:
1224
+ # Apply snapshot weightings for this year
1225
+ year_weightings = self._get_year_weightings(network, year)
1226
+ if year_weightings is not None:
1227
+ total_generation = float(
1228
+ (year_generation.values * year_weightings[:, None]).sum()
1229
+ )
1230
+ else:
1231
+ total_generation = float(year_generation.sum().sum())
1232
+
1233
+ # Calculate unserved energy for this year
1234
+ if hasattr(network, "_component_type_map"):
1235
+ unmet_load_gen_names = [
1236
+ name
1237
+ for name, comp_type in network._component_type_map.items()
1238
+ if comp_type == "UNMET_LOAD"
1239
+ ]
1240
+
1241
+ for gen_name in unmet_load_gen_names:
1242
+ if gen_name in year_generation.columns:
1243
+ if year_weightings is not None:
1244
+ gen_output = float(
1245
+ (
1246
+ year_generation[gen_name] * year_weightings
1247
+ ).sum()
1248
+ )
1249
+ else:
1250
+ gen_output = float(year_generation[gen_name].sum())
1251
+ unserved_energy += gen_output
1252
+
1253
+ # Calculate demand statistics for this year
1254
+ if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
1255
+ year_demand = self._filter_timeseries_by_year(
1256
+ network.loads_t.p, network.snapshots, year
1257
+ )
1258
+ if year_demand is not None and not year_demand.empty:
1259
+ year_weightings = self._get_year_weightings(network, year)
1260
+ if year_weightings is not None:
1261
+ total_demand = float(
1262
+ (year_demand.values * year_weightings[:, None]).sum()
1263
+ )
1264
+ else:
1265
+ total_demand = float(year_demand.sum().sum())
1266
+
1267
+ statistics["core_summary"] = {
1268
+ "total_generation_mwh": total_generation,
1269
+ "total_demand_mwh": total_demand,
1270
+ "total_cost": None, # Year-specific cost calculation would be complex
1271
+ "load_factor": (
1272
+ (total_demand / (total_generation + 1e-6))
1273
+ if total_generation > 0
1274
+ else 0
1275
+ ),
1276
+ "unserved_energy_mwh": unserved_energy,
1277
+ }
1278
+
1279
+ # Custom statistics
1280
+ unmet_load_percentage = (
1281
+ (unserved_energy / (total_demand + 1e-6)) * 100
1282
+ if total_demand > 0
1283
+ else 0
1284
+ )
1285
+
1286
+ # Calculate year-specific carrier statistics
1287
+ year_carrier_stats = self._calculate_year_carrier_statistics(network, year)
1288
+
1289
+ statistics["custom_statistics"] = {
1290
+ "unmet_load_percentage": unmet_load_percentage,
1291
+ "year": year,
1292
+ **year_carrier_stats, # Include all carrier-specific statistics for this year
1293
+ }
1294
+
1295
+ # Runtime info
1296
+ year_snapshot_count = self._count_year_snapshots(network.snapshots, year)
1297
+
1298
+ statistics["runtime_info"] = {
1299
+ "solve_time_seconds": solve_time,
1300
+ "year": year,
1301
+ "snapshot_count": year_snapshot_count,
1302
+ }
1303
+
1304
+ # Solver info
1305
+ statistics["solver_info"] = {"solver_name": solver_name, "year": year}
1306
+
1307
+ return statistics
1308
+
1309
+ except Exception as e:
1310
+ return {
1311
+ "error": str(e),
1312
+ "core_summary": {},
1313
+ "custom_statistics": {"year": year},
1314
+ "runtime_info": {"solve_time_seconds": solve_time, "year": year},
1315
+ "solver_info": {"solver_name": solver_name, "year": year},
1316
+ }
1317
+
1318
+ def _filter_timeseries_by_year(
1319
+ self, timeseries_df: "pd.DataFrame", snapshots: "pd.Index", year: int
1320
+ ) -> "pd.DataFrame":
1321
+ """Filter timeseries data by year"""
1322
+ try:
1323
+ # Handle MultiIndex case (multi-period optimization)
1324
+ if hasattr(snapshots, "levels"):
1325
+ period_values = snapshots.get_level_values(0)
1326
+ year_mask = period_values == year
1327
+ if year_mask.any():
1328
+ year_snapshots = snapshots[year_mask]
1329
+ return timeseries_df.loc[year_snapshots]
1330
+
1331
+ # Handle DatetimeIndex case (regular time series)
1332
+ elif hasattr(snapshots, "year"):
1333
+ year_mask = snapshots.year == year
1334
+ if year_mask.any():
1335
+ return timeseries_df.loc[year_mask]
1336
+
1337
+ # Fallback - return None if can't filter
1338
+ return None
1339
+
1340
+ except Exception as e:
1341
+ return None
1342
+
1343
+ def _get_year_weightings(self, network: "pypsa.Network", year: int) -> "np.ndarray":
1344
+ """Get snapshot weightings for a specific year"""
1345
+ try:
1346
+ # Filter snapshot weightings by year
1347
+ if hasattr(network.snapshots, "levels"):
1348
+ period_values = network.snapshots.get_level_values(0)
1349
+ year_mask = period_values == year
1350
+ if year_mask.any():
1351
+ year_snapshots = network.snapshots[year_mask]
1352
+ year_weightings = network.snapshot_weightings.loc[year_snapshots]
1353
+ if isinstance(year_weightings, pd.DataFrame):
1354
+ if "objective" in year_weightings.columns:
1355
+ return year_weightings["objective"].values
1356
+ else:
1357
+ return year_weightings.iloc[:, 0].values
1358
+ else:
1359
+ return year_weightings.values
1360
+
1361
+ elif hasattr(network.snapshots, "year"):
1362
+ year_mask = network.snapshots.year == year
1363
+ if year_mask.any():
1364
+ year_weightings = network.snapshot_weightings.loc[year_mask]
1365
+ if isinstance(year_weightings, pd.DataFrame):
1366
+ if "objective" in year_weightings.columns:
1367
+ return year_weightings["objective"].values
1368
+ else:
1369
+ return year_weightings.iloc[:, 0].values
1370
+ else:
1371
+ return year_weightings.values
1372
+
1373
+ return None
1374
+
1375
+ except Exception as e:
1376
+ return None
1377
+
1378
+ def _count_year_snapshots(self, snapshots: "pd.Index", year: int) -> int:
1379
+ """Count snapshots for a specific year"""
1380
+ try:
1381
+ # Handle MultiIndex case
1382
+ if hasattr(snapshots, "levels"):
1383
+ period_values = snapshots.get_level_values(0)
1384
+ year_mask = period_values == year
1385
+ return year_mask.sum()
1386
+
1387
+ # Handle DatetimeIndex case
1388
+ elif hasattr(snapshots, "year"):
1389
+ year_mask = snapshots.year == year
1390
+ return year_mask.sum()
1391
+
1392
+ # Fallback
1393
+ return 0
1394
+
1395
+ except Exception as e:
1396
+ return 0
1397
+
1398
+ def _calculate_year_carrier_statistics(
1399
+ self, network: "pypsa.Network", year: int
1400
+ ) -> Dict[str, Any]:
1401
+ """Calculate carrier-specific statistics for a specific year"""
1402
+ # Note: This is a simplified implementation that doesn't have database access
1403
+ # The proper implementation should be done in the storage module where we have conn
1404
+ # For now, return empty dictionaries - the storage module will handle this properly
1405
+ return {
1406
+ "dispatch_by_carrier": {},
1407
+ "capacity_by_carrier": {},
1408
+ "emissions_by_carrier": {},
1409
+ "capital_cost_by_carrier": {},
1410
+ "operational_cost_by_carrier": {},
1411
+ "total_system_cost_by_carrier": {},
1412
+ }
1413
+
1414
+ def _get_generator_carrier_name(self, generator_name: str) -> Optional[str]:
1415
+ """Get carrier name for a generator - simplified implementation"""
1416
+ # This is a simplified approach - in practice, this should query the database
1417
+ # or use the component type mapping from the network
1418
+
1419
+ # Try to extract carrier from generator name patterns
1420
+ gen_lower = generator_name.lower()
1421
+
1422
+ if "coal" in gen_lower:
1423
+ return "coal"
1424
+ elif "gas" in gen_lower or "ccgt" in gen_lower or "ocgt" in gen_lower:
1425
+ return "gas"
1426
+ elif "nuclear" in gen_lower:
1427
+ return "nuclear"
1428
+ elif "solar" in gen_lower or "pv" in gen_lower:
1429
+ return "solar"
1430
+ elif "wind" in gen_lower:
1431
+ return "wind"
1432
+ elif "hydro" in gen_lower:
1433
+ return "hydro"
1434
+ elif "biomass" in gen_lower:
1435
+ return "biomass"
1436
+ elif "battery" in gen_lower:
1437
+ return "battery"
1438
+ elif "unmet" in gen_lower:
1439
+ return "Unmet Load"
1440
+ else:
1441
+ # Default to generator name if no pattern matches
1442
+ return generator_name