pyconvexity 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyconvexity might be problematic. Click here for more details.

Files changed (33) hide show
  1. pyconvexity/__init__.py +30 -6
  2. pyconvexity/data/README.md +101 -0
  3. pyconvexity/data/__init__.py +18 -0
  4. pyconvexity/data/__pycache__/__init__.cpython-313.pyc +0 -0
  5. pyconvexity/data/loaders/__init__.py +3 -0
  6. pyconvexity/data/loaders/__pycache__/__init__.cpython-313.pyc +0 -0
  7. pyconvexity/data/loaders/__pycache__/cache.cpython-313.pyc +0 -0
  8. pyconvexity/data/loaders/cache.py +212 -0
  9. pyconvexity/data/sources/__init__.py +5 -0
  10. pyconvexity/data/sources/__pycache__/__init__.cpython-313.pyc +0 -0
  11. pyconvexity/data/sources/__pycache__/gem.cpython-313.pyc +0 -0
  12. pyconvexity/data/sources/gem.py +412 -0
  13. pyconvexity/io/__init__.py +32 -0
  14. pyconvexity/io/excel_exporter.py +991 -0
  15. pyconvexity/io/excel_importer.py +1112 -0
  16. pyconvexity/io/netcdf_exporter.py +192 -0
  17. pyconvexity/io/netcdf_importer.py +599 -0
  18. pyconvexity/models/__init__.py +7 -0
  19. pyconvexity/models/components.py +3 -0
  20. pyconvexity/models/scenarios.py +177 -0
  21. pyconvexity/solvers/__init__.py +29 -0
  22. pyconvexity/solvers/pypsa/__init__.py +24 -0
  23. pyconvexity/solvers/pypsa/api.py +398 -0
  24. pyconvexity/solvers/pypsa/batch_loader.py +311 -0
  25. pyconvexity/solvers/pypsa/builder.py +656 -0
  26. pyconvexity/solvers/pypsa/constraints.py +321 -0
  27. pyconvexity/solvers/pypsa/solver.py +1255 -0
  28. pyconvexity/solvers/pypsa/storage.py +2207 -0
  29. {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.3.dist-info}/METADATA +5 -2
  30. pyconvexity-0.1.3.dist-info/RECORD +45 -0
  31. pyconvexity-0.1.2.dist-info/RECORD +0 -20
  32. {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.3.dist-info}/WHEEL +0 -0
  33. {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1255 @@
1
+ """
2
+ Solving functionality for PyPSA networks.
3
+
4
+ Handles the actual optimization solving with various solvers and configurations.
5
+ """
6
+
7
+ import logging
8
+ import time
9
+ import uuid
10
+ import pandas as pd
11
+ import numpy as np
12
+ from typing import Dict, Any, Optional
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class NetworkSolver:
18
+ """
19
+ Handles solving PyPSA networks with various solvers and configurations.
20
+
21
+ This class encapsulates the solving logic, including solver configuration,
22
+ multi-period optimization setup, and result extraction.
23
+ """
24
+
25
+ def __init__(self):
26
+ # Import PyPSA with error handling
27
+ try:
28
+ import pypsa
29
+ self.pypsa = pypsa
30
+ except ImportError as e:
31
+ raise ImportError(
32
+ "PyPSA is not installed or could not be imported. "
33
+ "Please ensure it is installed correctly in the environment."
34
+ ) from e
35
+
36
+ def solve_network(
37
+ self,
38
+ network: 'pypsa.Network',
39
+ solver_name: str = "highs",
40
+ solver_options: Optional[Dict[str, Any]] = None,
41
+ discount_rate: Optional[float] = None,
42
+ job_id: Optional[str] = None,
43
+ conn=None,
44
+ network_id: Optional[int] = None,
45
+ scenario_id: Optional[int] = None,
46
+ constraint_applicator=None
47
+ ) -> Dict[str, Any]:
48
+ """
49
+ Solve PyPSA network and return results.
50
+
51
+ Args:
52
+ network: PyPSA Network object to solve
53
+ solver_name: Solver to use (default: "highs")
54
+ solver_options: Optional solver-specific options
55
+ discount_rate: Optional discount rate for multi-period optimization
56
+ job_id: Optional job ID for tracking
57
+
58
+ Returns:
59
+ Dictionary with solve results and metadata
60
+
61
+ Raises:
62
+ ImportError: If PyPSA is not available
63
+ Exception: If solving fails
64
+ """
65
+ start_time = time.time()
66
+ run_id = str(uuid.uuid4())
67
+
68
+ logger.info(f"Starting network solve with {solver_name}")
69
+
70
+ try:
71
+ # Get solver configuration
72
+ actual_solver_name, solver_config = self._get_solver_config(solver_name, solver_options)
73
+
74
+ # Always use multi-period mode for consistency
75
+ # Extract years from network snapshots
76
+ if hasattr(network, '_available_years') and network._available_years:
77
+ years = network._available_years
78
+ elif hasattr(network.snapshots, 'year'):
79
+ years = sorted(network.snapshots.year.unique())
80
+ else:
81
+ # If no year info, use a single default year
82
+ years = [2020] # Default single year
83
+
84
+ # Configure for multi-period optimization (works for single year too)
85
+ effective_discount_rate = discount_rate if discount_rate is not None else 0.05 # Default 5%
86
+ logger.info(f"Configuring multi-period optimization with discount rate {effective_discount_rate}")
87
+ network = self._configure_multi_period_optimization(network, years, effective_discount_rate)
88
+
89
+ # CRITICAL: Set snapshot weightings AFTER multi-period setup
90
+ # PyPSA's multi-period setup can reset snapshot weightings to 1.0
91
+ if conn and network_id:
92
+ self._set_snapshot_weightings_after_multiperiod(conn, network_id, network)
93
+
94
+ # Prepare optimization constraints (extra_functionality)
95
+ extra_functionality = None
96
+ if conn and network_id and constraint_applicator:
97
+ optimization_constraints = constraint_applicator.get_optimization_constraints(conn, network_id, scenario_id)
98
+ if optimization_constraints:
99
+ logger.info(f"Applying {len(optimization_constraints)} optimization-time constraints")
100
+ extra_functionality = self._create_extra_functionality(optimization_constraints, constraint_applicator)
101
+
102
+ # Solver diagnostics (simplified version of old code)
103
+ logger.info(f"=== PYPSA SOLVER DIAGNOSTICS ===")
104
+ logger.info(f"Requested solver: {solver_name}")
105
+ logger.info(f"Actual solver: {actual_solver_name}")
106
+ if solver_config:
107
+ logger.info(f"Solver options: {solver_config}")
108
+ logger.info(f"Multi-period optimization: {self._is_multi_period_network(network)}")
109
+ logger.info(f"Investment periods: {getattr(network, 'investment_periods', 'None')}")
110
+ logger.info(f"=== END PYPSA SOLVER DIAGNOSTICS ===")
111
+
112
+ # Solve the network
113
+ logger.info(f"Solving network with {actual_solver_name}")
114
+
115
+ if solver_config:
116
+ result = self._solve_with_config(network, actual_solver_name, solver_config, job_id, extra_functionality)
117
+ else:
118
+ result = self._solve_standard(network, actual_solver_name, job_id, extra_functionality)
119
+
120
+ solve_time = time.time() - start_time
121
+
122
+ # Post-solve debug logging (matches old code)
123
+ objective_value = getattr(network, 'objective', None)
124
+ if objective_value is not None:
125
+ logger.info(f"[DEBUG] POST-SOLVE snapshot_weightings structure:")
126
+ if hasattr(network, 'snapshot_weightings'):
127
+ logger.info(f"[DEBUG] Type: {type(network.snapshot_weightings)}")
128
+ logger.info(f"[DEBUG] Columns: {list(network.snapshot_weightings.columns)}")
129
+ logger.info(f"[DEBUG] Shape: {network.snapshot_weightings.shape}")
130
+ logger.info(f"[DEBUG] Unique values in objective column: {network.snapshot_weightings['objective'].unique()}")
131
+ logger.info(f"[DEBUG] Sum of objective column: {network.snapshot_weightings['objective'].sum()}")
132
+
133
+ if hasattr(network, 'investment_period_weightings'):
134
+ logger.info(f"[DEBUG] investment_period_weightings exists:")
135
+ logger.info(f"[DEBUG] Type: {type(network.investment_period_weightings)}")
136
+ logger.info(f"[DEBUG] Content:\n{network.investment_period_weightings}")
137
+
138
+ # Extract solve results with comprehensive statistics
139
+ solve_result = self._extract_solve_results(network, result, solve_time, actual_solver_name, run_id)
140
+
141
+ # Calculate comprehensive network statistics (all years combined)
142
+ if solve_result.get('success'):
143
+ logger.info("Calculating comprehensive network statistics...")
144
+ network_statistics = self._calculate_comprehensive_network_statistics(network, solve_time, actual_solver_name)
145
+ solve_result['network_statistics'] = network_statistics
146
+
147
+ # Calculate year-based statistics for capacity expansion analysis
148
+ logger.info("Calculating year-based statistics...")
149
+ year_statistics = self._calculate_statistics_by_year(network, solve_time, actual_solver_name)
150
+ solve_result['year_statistics'] = year_statistics
151
+ solve_result['year_statistics_available'] = len(year_statistics) > 0
152
+
153
+ logger.info(f"Solve completed in {solve_time:.2f} seconds with status: {solve_result['status']}")
154
+ logger.info(f"PyPSA result object: {result}")
155
+ logger.info(f"PyPSA result status: {getattr(result, 'status', 'no status attr')}")
156
+ logger.info(f"Network objective: {getattr(network, 'objective', 'no objective')}")
157
+ logger.info(f"Solve result success: {solve_result.get('success')}")
158
+
159
+ return solve_result
160
+
161
+ except Exception as e:
162
+ solve_time = time.time() - start_time
163
+ logger.error(f"Solve failed after {solve_time:.2f} seconds: {e}")
164
+
165
+ return {
166
+ "success": False,
167
+ "status": "failed",
168
+ "error": str(e),
169
+ "solve_time": solve_time,
170
+ "solver_name": actual_solver_name if 'actual_solver_name' in locals() else solver_name,
171
+ "run_id": run_id,
172
+ "objective_value": None
173
+ }
174
+
175
+ def _get_solver_config(self, solver_name: str, solver_options: Optional[Dict[str, Any]] = None) -> tuple[str, Optional[Dict[str, Any]]]:
176
+ """
177
+ Get the actual solver name and options for special solver configurations.
178
+
179
+ Args:
180
+ solver_name: The solver name (e.g., 'gurobi (barrier)', 'highs')
181
+ solver_options: Optional additional solver options
182
+
183
+ Returns:
184
+ Tuple of (actual_solver_name, solver_options_dict)
185
+ """
186
+ # Handle "default" solver
187
+ if solver_name == 'default':
188
+ logger.warning("Solver name 'default' received - falling back to 'highs'.")
189
+ return 'highs', solver_options
190
+
191
+ # Handle special Gurobi configurations
192
+ if solver_name == 'gurobi (barrier)':
193
+ gurobi_barrier_options = {
194
+ 'solver_options': {
195
+ 'Method': 2, # Barrier
196
+ 'Crossover': 0, # Skip crossover
197
+ 'MIPGap': 0.05, # 5% gap
198
+ 'Threads': 4, # Use all cores
199
+ 'Presolve': 2, # Aggressive presolve
200
+ 'ConcurrentMIP': 1, # Parallel root strategies
201
+ 'BarConvTol': 1e-4, # Relaxed barrier convergence
202
+ 'FeasibilityTol': 1e-5,
203
+ 'OptimalityTol': 1e-5,
204
+ 'NumericFocus': 1, # Improve stability
205
+ 'PreSparsify': 1,
206
+ }
207
+ }
208
+ # Merge with any additional options
209
+ if solver_options:
210
+ gurobi_barrier_options.update(solver_options)
211
+ return 'gurobi', gurobi_barrier_options
212
+
213
+ elif solver_name == 'gurobi (barrier homogeneous)':
214
+ gurobi_barrier_homogeneous_options = {
215
+ 'solver_options': {
216
+ 'Method': 2, # Barrier
217
+ 'Crossover': 0, # Skip crossover
218
+ 'MIPGap': 0.05,
219
+ 'Threads': 4,
220
+ 'Presolve': 2,
221
+ 'ConcurrentMIP': 1,
222
+ 'BarConvTol': 1e-4,
223
+ 'FeasibilityTol': 1e-5,
224
+ 'OptimalityTol': 1e-5,
225
+ 'NumericFocus': 1,
226
+ 'PreSparsify': 1,
227
+ 'BarHomogeneous': 1, # Enable homogeneous barrier algorithm
228
+ }
229
+ }
230
+ if solver_options:
231
+ gurobi_barrier_homogeneous_options.update(solver_options)
232
+ return 'gurobi', gurobi_barrier_homogeneous_options
233
+
234
+ elif solver_name == 'gurobi (barrier+crossover balanced)':
235
+ gurobi_options_balanced = {
236
+ 'solver_options': {
237
+ 'Method': 2,
238
+ 'Crossover': 1, # Dual crossover
239
+ 'MIPGap': 0.01,
240
+ 'Threads': 4,
241
+ 'Presolve': 2,
242
+ 'Heuristics': 0.1,
243
+ 'Cuts': 2,
244
+ 'ConcurrentMIP': 1,
245
+ 'BarConvTol': 1e-6,
246
+ 'FeasibilityTol': 1e-6,
247
+ 'OptimalityTol': 1e-6,
248
+ 'NumericFocus': 1,
249
+ 'PreSparsify': 1,
250
+ }
251
+ }
252
+ if solver_options:
253
+ gurobi_options_balanced.update(solver_options)
254
+ logger.info(f"Using Gurobi Barrier+Dual Crossover Balanced configuration")
255
+ return 'gurobi', gurobi_options_balanced
256
+
257
+ elif solver_name == 'gurobi (dual simplex)':
258
+ gurobi_dual_options = {
259
+ 'solver_options': {
260
+ 'Method': 1, # Dual simplex method
261
+ 'Threads': 0, # Use all available cores
262
+ 'Presolve': 2, # Aggressive presolve
263
+ }
264
+ }
265
+ if solver_options:
266
+ gurobi_dual_options.update(solver_options)
267
+ return 'gurobi', gurobi_dual_options
268
+
269
+ # Check if this is a known valid solver name
270
+ elif solver_name in ['highs', 'gurobi', 'cplex', 'glpk', 'cbc', 'scip']:
271
+ return solver_name, solver_options
272
+
273
+ else:
274
+ # Unknown solver name - log warning and fall back to highs
275
+ logger.warning(f"Unknown solver name '{solver_name}' - falling back to 'highs'")
276
+ return 'highs', solver_options
277
+
278
+ def _solve_with_config(self, network: 'pypsa.Network', solver_name: str, solver_config: Dict[str, Any], job_id: Optional[str], extra_functionality=None) -> Any:
279
+ """Solve network with specific solver configuration."""
280
+ # Check if multi-period optimization is needed
281
+ is_multi_period = self._is_multi_period_network(network)
282
+
283
+ # Add extra_functionality to solver config if provided
284
+ if extra_functionality:
285
+ solver_config = solver_config.copy() # Don't modify original
286
+ solver_config['extra_functionality'] = extra_functionality
287
+
288
+ if is_multi_period:
289
+ return network.optimize(solver_name=solver_name, multi_investment_periods=True, **solver_config)
290
+ else:
291
+ return network.optimize(solver_name=solver_name, **solver_config)
292
+
293
+ def _solve_standard(self, network: 'pypsa.Network', solver_name: str, job_id: Optional[str], extra_functionality=None) -> Any:
294
+ """Solve network with standard configuration."""
295
+ # Check if multi-period optimization is needed
296
+ is_multi_period = self._is_multi_period_network(network)
297
+
298
+ if extra_functionality:
299
+ if is_multi_period:
300
+ return network.optimize(solver_name=solver_name, multi_investment_periods=True, extra_functionality=extra_functionality)
301
+ else:
302
+ return network.optimize(solver_name=solver_name, extra_functionality=extra_functionality)
303
+ else:
304
+ if is_multi_period:
305
+ return network.optimize(solver_name=solver_name, multi_investment_periods=True)
306
+ else:
307
+ return network.optimize(solver_name=solver_name)
308
+
309
+ def _is_multi_period_network(self, network: 'pypsa.Network') -> bool:
310
+ """
311
+ Determine if the network requires multi-period optimization.
312
+
313
+ Multi-period optimization is needed when:
314
+ 1. Network has investment_periods attribute with multiple periods
315
+ 2. Network snapshots are MultiIndex with period/timestep structure
316
+ 3. Network has generators with build_year attributes
317
+
318
+ Args:
319
+ network: PyPSA Network object
320
+
321
+ Returns:
322
+ True if multi-period optimization is needed, False otherwise
323
+ """
324
+ try:
325
+ # Check if network has investment_periods
326
+ if hasattr(network, 'investment_periods') and network.investment_periods is not None:
327
+ periods = list(network.investment_periods)
328
+ if len(periods) > 1:
329
+ return True
330
+ elif len(periods) == 1:
331
+ # Even with single period, check if we have build_year constraints
332
+ if hasattr(network, 'generators') and not network.generators.empty:
333
+ if 'build_year' in network.generators.columns:
334
+ build_year_gens = network.generators[network.generators['build_year'].notna()]
335
+ if not build_year_gens.empty:
336
+ return True
337
+
338
+ # Check if snapshots are MultiIndex (period, timestep structure)
339
+ if hasattr(network, 'snapshots') and hasattr(network.snapshots, 'names'):
340
+ if network.snapshots.names and len(network.snapshots.names) >= 2:
341
+ if network.snapshots.names[0] == 'period':
342
+ return True
343
+
344
+ # Check if we have generators with build_year (fallback check)
345
+ if hasattr(network, 'generators') and not network.generators.empty:
346
+ if 'build_year' in network.generators.columns:
347
+ build_year_gens = network.generators[network.generators['build_year'].notna()]
348
+ if not build_year_gens.empty:
349
+ # If we have build_year but no proper multi-period setup, we should still try multi-period
350
+ return True
351
+
352
+ return False
353
+
354
+ except Exception as e:
355
+ logger.error(f"Error checking multi-period status: {e}")
356
+ return False
357
+
358
+ def _create_extra_functionality(self, optimization_constraints: list, constraint_applicator) -> callable:
359
+ """
360
+ Create extra_functionality function for optimization-time constraints.
361
+
362
+ This matches the old PyPSA solver's approach to applying constraints during optimization.
363
+
364
+ Args:
365
+ optimization_constraints: List of optimization constraint dictionaries
366
+ constraint_applicator: ConstraintApplicator instance
367
+
368
+ Returns:
369
+ Function that can be passed to network.optimize(extra_functionality=...)
370
+ """
371
+ def extra_functionality(network, snapshots):
372
+ """Apply optimization constraints during solve - matches old code structure"""
373
+ try:
374
+ logger.info(f"Applying {len(optimization_constraints)} optimization constraints during solve")
375
+
376
+ # Apply each constraint in priority order
377
+ sorted_constraints = sorted(optimization_constraints, key=lambda x: x.get('priority', 0))
378
+
379
+ for constraint in sorted_constraints:
380
+ try:
381
+ constraint_applicator.apply_optimization_constraint(network, snapshots, constraint)
382
+ except Exception as e:
383
+ logger.error(f"Failed to apply optimization constraint {constraint.get('name', 'unknown')}: {e}")
384
+ continue
385
+
386
+ logger.info("Optimization constraints applied successfully")
387
+
388
+ except Exception as e:
389
+ logger.error(f"Failed to apply optimization constraints: {e}")
390
+ # Don't re-raise - let optimization continue
391
+
392
+ return extra_functionality
393
+
394
+ def _set_snapshot_weightings_after_multiperiod(self, conn, network_id: int, network: 'pypsa.Network'):
395
+ """Set snapshot weightings AFTER multi-period setup - matches old code approach."""
396
+ try:
397
+ from pyconvexity.models import get_network_time_periods, get_network_info
398
+
399
+ time_periods = get_network_time_periods(conn, network_id)
400
+ if time_periods and len(network.snapshots) > 0:
401
+ logger.info(f"Setting snapshot weightings AFTER multi-period setup for {len(time_periods)} time periods")
402
+
403
+ # Get network info to determine time interval (stored in networks table, not network_config)
404
+ network_info = get_network_info(conn, network_id)
405
+ time_interval = network_info.get('time_interval', '1H')
406
+ weight = self._parse_time_interval(time_interval)
407
+
408
+ if weight is None:
409
+ weight = 1.0
410
+ logger.warning(f"Could not parse time interval '{time_interval}', using default weight of 1.0")
411
+
412
+ logger.info(f"Parsed time interval '{time_interval}' -> weight = {weight}")
413
+
414
+ # Create weightings array - all snapshots get the same weight for this time resolution
415
+ weightings = [weight] * len(time_periods)
416
+
417
+ if len(weightings) == len(network.snapshots):
418
+ # Set all three columns like the old code - critical for proper objective calculation
419
+ network.snapshot_weightings.loc[:, 'objective'] = weightings
420
+ network.snapshot_weightings.loc[:, 'generators'] = weightings
421
+ network.snapshot_weightings.loc[:, 'stores'] = weightings
422
+ logger.info(f"Set snapshot weightings AFTER multi-period setup: objective, generators, stores columns")
423
+
424
+ # Debug logging like old code
425
+ logger.info(f"Snapshot weightings shape: {network.snapshot_weightings.shape}")
426
+ logger.info(f"Unique values in objective column: {network.snapshot_weightings['objective'].unique()}")
427
+ logger.info(f"Sum of objective column: {network.snapshot_weightings['objective'].sum()}")
428
+ logger.info(f"Weight per snapshot: {weight} hours")
429
+ else:
430
+ logger.warning(f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})")
431
+ except Exception as e:
432
+ logger.warning(f"Failed to set snapshot weightings after multi-period setup: {e}")
433
+ logger.exception("Full traceback:")
434
+
435
+ def _parse_time_interval(self, time_interval: str) -> Optional[float]:
436
+ """Parse time interval string to hours - handles multiple formats."""
437
+ if not time_interval:
438
+ return None
439
+
440
+ try:
441
+ # Clean up the string
442
+ interval = time_interval.strip()
443
+
444
+ # Handle ISO 8601 duration format (PT3H, PT30M, etc.)
445
+ if interval.startswith('PT') and interval.endswith('H'):
446
+ # Extract hours (e.g., 'PT3H' -> 3.0)
447
+ hours_str = interval[2:-1] # Remove 'PT' and 'H'
448
+ return float(hours_str)
449
+ elif interval.startswith('PT') and interval.endswith('M'):
450
+ # Extract minutes (e.g., 'PT30M' -> 0.5)
451
+ minutes_str = interval[2:-1] # Remove 'PT' and 'M'
452
+ return float(minutes_str) / 60.0
453
+ elif interval.startswith('PT') and interval.endswith('S'):
454
+ # Extract seconds (e.g., 'PT3600S' -> 1.0)
455
+ seconds_str = interval[2:-1] # Remove 'PT' and 'S'
456
+ return float(seconds_str) / 3600.0
457
+
458
+ # Handle simple frequency strings (3H, 2D, etc.)
459
+ elif interval.endswith('H') or interval.endswith('h'):
460
+ hours_str = interval[:-1]
461
+ return float(hours_str) if hours_str else 1.0
462
+ elif interval.endswith('D') or interval.endswith('d'):
463
+ days_str = interval[:-1]
464
+ return float(days_str) * 24 if days_str else 24.0
465
+ elif interval.endswith('M') or interval.endswith('m'):
466
+ minutes_str = interval[:-1]
467
+ return float(minutes_str) / 60.0 if minutes_str else 1.0/60.0
468
+ elif interval.endswith('S') or interval.endswith('s'):
469
+ seconds_str = interval[:-1]
470
+ return float(seconds_str) / 3600.0 if seconds_str else 1.0/3600.0
471
+
472
+ # Try to parse as plain number (assume hours)
473
+ else:
474
+ return float(interval)
475
+
476
+ except (ValueError, TypeError) as e:
477
+ logger.warning(f"Could not parse time interval '{time_interval}': {e}")
478
+ return None
479
+
480
+ def _configure_multi_period_optimization(self, network: 'pypsa.Network', years: list, discount_rate: float) -> 'pypsa.Network':
481
+ """
482
+ Configure network for multi-period optimization (works for single or multiple years).
483
+
484
+ Args:
485
+ network: PyPSA Network object
486
+ years: List of years in the network
487
+ discount_rate: Discount rate for investment calculations
488
+
489
+ Returns:
490
+ Configured network
491
+ """
492
+ try:
493
+ import pandas as pd
494
+
495
+ logger.info(f"Configuring multi-period optimization for years: {years}")
496
+ logger.info(f"Current snapshots: {len(network.snapshots)} time steps")
497
+
498
+ # Handle case where snapshots don't have year info but years were extracted manually
499
+ if not hasattr(network.snapshots, 'year'):
500
+ if len(years) > 0:
501
+ # Use the manually extracted years from timestamps
502
+ # Create MultiIndex snapshots by dividing existing snapshots among the years
503
+ snapshots_per_year = len(network.snapshots) // len(years)
504
+ multi_snapshots = []
505
+
506
+ for i, year in enumerate(years):
507
+ start_idx = i * snapshots_per_year
508
+ end_idx = (i + 1) * snapshots_per_year if i < len(years) - 1 else len(network.snapshots)
509
+ year_snapshots = network.snapshots[start_idx:end_idx]
510
+ for snapshot in year_snapshots:
511
+ multi_snapshots.append((year, snapshot))
512
+
513
+ logger.info(f"Created {len(multi_snapshots)} multi-period snapshots from {len(network.snapshots)} original snapshots")
514
+
515
+ else:
516
+ # Only use 2020 fallback if no years were extracted at all (should be rare)
517
+ single_year = 2020
518
+ multi_snapshots = [(single_year, snapshot) for snapshot in network.snapshots]
519
+ years = [single_year]
520
+ logger.warning(f"No years provided, using fallback year {single_year}")
521
+ else:
522
+ # Create MultiIndex snapshots from existing year-based snapshots
523
+ multi_snapshots = []
524
+ for year in years:
525
+ year_snapshots = network.snapshots[network.snapshots.year == year]
526
+ for snapshot in year_snapshots:
527
+ multi_snapshots.append((year, snapshot))
528
+
529
+ logger.info(f"Created {len(multi_snapshots)} multi-period snapshots from year-based snapshots")
530
+
531
+ # Set MultiIndex snapshots and investment periods
532
+ network.snapshots = pd.MultiIndex.from_tuples(multi_snapshots, names=['period', 'timestep'])
533
+ network.investment_periods = years
534
+ print(network.investment_periods) # Match old code debug output
535
+
536
+ logger.info(f"Set investment_periods: {network.investment_periods}")
537
+ logger.info(f"MultiIndex snapshots created with levels: {network.snapshots.names}")
538
+
539
+ # Calculate investment period weightings with discount rate
540
+ self._calculate_investment_weightings(network, discount_rate)
541
+
542
+ # Configure build year constraints for multi-period optimization
543
+ self._configure_build_year_constraints(network, years)
544
+
545
+ logger.info(f"Successfully configured multi-period optimization for {len(years)} investment periods")
546
+
547
+ except Exception as e:
548
+ logger.error(f"Failed to configure multi-period optimization: {e}")
549
+ logger.exception("Full traceback:")
550
+ # Don't re-raise - let the solve continue with original configuration
551
+
552
+ return network
553
+
554
+ def _calculate_investment_weightings(self, network: 'pypsa.Network', discount_rate: float) -> None:
555
+ """
556
+ Calculate investment period weightings using discount rate - matching old PyPSA solver exactly.
557
+
558
+ Args:
559
+ network: PyPSA Network object
560
+ discount_rate: Discount rate for NPV calculations
561
+ """
562
+ try:
563
+ import pandas as pd
564
+
565
+ if not hasattr(network, 'investment_periods') or len(network.investment_periods) == 0:
566
+ return
567
+
568
+ years = network.investment_periods
569
+ # Convert pandas Index to list for easier handling
570
+ years_list = years.tolist() if hasattr(years, 'tolist') else list(years)
571
+
572
+ logger.info(f"Calculating investment weightings for periods: {years_list} with discount rate: {discount_rate}")
573
+
574
+ # For single year, use simple weighting of 1.0
575
+ if len(years_list) == 1:
576
+ # Single year case
577
+ network.investment_period_weightings = pd.DataFrame({
578
+ 'objective': pd.Series({years_list[0]: 1.0}),
579
+ 'years': pd.Series({years_list[0]: 1})
580
+ })
581
+ logger.info(f"Set single-year investment period weightings for year {years_list[0]}")
582
+ else:
583
+ # Multi-year case - EXACTLY match old code logic
584
+ # Get unique years from the network snapshots to determine period lengths
585
+ if hasattr(network.snapshots, 'year'):
586
+ snapshot_years = sorted(network.snapshots.year.unique())
587
+ elif hasattr(network.snapshots, 'get_level_values'):
588
+ # MultiIndex case - get years from 'period' level
589
+ snapshot_years = sorted(network.snapshots.get_level_values('period').unique())
590
+ else:
591
+ # Fallback: use investment periods as years
592
+ snapshot_years = years_list
593
+
594
+ logger.info(f"Snapshot years found: {snapshot_years}")
595
+
596
+ # Calculate years per period - EXACTLY matching old code
597
+ years_diff = []
598
+ for i, year in enumerate(years_list):
599
+ if i < len(years_list) - 1:
600
+ # Years between this period and the next
601
+ next_year = years_list[i + 1]
602
+ period_years = next_year - year
603
+ else:
604
+ # For the last period, calculate based on snapshot coverage
605
+ if snapshot_years:
606
+ # Find the last snapshot year that's >= current period year
607
+ last_snapshot_year = max([y for y in snapshot_years if y >= year])
608
+ period_years = last_snapshot_year - year + 1
609
+ else:
610
+ # Fallback: assume same length as previous period or 1
611
+ if len(years_diff) > 0:
612
+ period_years = years_diff[-1] # Same as previous period
613
+ else:
614
+ period_years = 1
615
+
616
+ years_diff.append(period_years)
617
+ logger.info(f"Period {year}: {period_years} years")
618
+
619
+ # Create weightings DataFrame with years column
620
+ weightings_df = pd.DataFrame({
621
+ 'years': pd.Series(years_diff, index=years_list)
622
+ })
623
+
624
+ # Calculate objective weightings with discount rate - EXACTLY matching old code
625
+ r = discount_rate
626
+ T = 0 # Cumulative time tracker
627
+
628
+ logger.info(f"Calculating discount factors with rate {r}:")
629
+ for period, nyears in weightings_df.years.items():
630
+ # Calculate discount factors for each year in this period
631
+ discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
632
+ period_weighting = sum(discounts)
633
+ weightings_df.at[period, "objective"] = period_weighting
634
+
635
+ logger.info(f" Period {period}: years {T} to {T + nyears - 1}, discounts={[f'{d:.4f}' for d in discounts]}, sum={period_weighting:.4f}")
636
+ T += nyears # Update cumulative time
637
+
638
+ network.investment_period_weightings = weightings_df
639
+ logger.info(f"Final investment period weightings:")
640
+ logger.info(f" Years: {weightings_df['years'].to_dict()}")
641
+ logger.info(f" Objective: {weightings_df['objective'].to_dict()}")
642
+
643
+ except Exception as e:
644
+ logger.error(f"Failed to calculate investment weightings: {e}")
645
+ logger.exception("Full traceback:")
646
+
647
+ def _configure_build_year_constraints(self, network: 'pypsa.Network', years: list) -> None:
648
+ """
649
+ Configure build year constraints for multi-period optimization.
650
+
651
+ In PyPSA multi-period optimization, generators should only be available for investment
652
+ starting from their build year. This method ensures proper constraint setup.
653
+
654
+ Args:
655
+ network: PyPSA Network object
656
+ years: List of investment periods (years)
657
+ """
658
+ try:
659
+ import pandas as pd
660
+
661
+ logger.info("Configuring build year constraints for multi-period optimization")
662
+
663
+ # Check if we have generators with build_year attributes
664
+ if not hasattr(network, 'generators') or network.generators.empty:
665
+ logger.warning("No generators found, skipping build year constraints")
666
+ return
667
+
668
+ if 'build_year' not in network.generators.columns:
669
+ logger.warning("No build_year column found in generators, skipping build year constraints")
670
+ return
671
+
672
+ # Get generators with build year information
673
+ generators_with_build_year = network.generators[network.generators['build_year'].notna()]
674
+
675
+ if generators_with_build_year.empty:
676
+ logger.warning("No generators have build_year values, skipping build year constraints")
677
+ return
678
+
679
+ logger.info(f"Applying build year constraints to {len(generators_with_build_year)} generators")
680
+
681
+ # Check if generators have proper extendable capacity settings
682
+ if 'p_nom_extendable' in network.generators.columns:
683
+ extendable_generators = generators_with_build_year[generators_with_build_year['p_nom_extendable'] == True]
684
+
685
+ if extendable_generators.empty:
686
+ logger.warning("No generators are marked as extendable (p_nom_extendable=True). Build year constraints only apply to extendable generators.")
687
+ return
688
+
689
+ logger.info(f"Found {len(extendable_generators)} extendable generators with build years")
690
+ else:
691
+ logger.warning("No p_nom_extendable column found - cannot determine which generators are extendable")
692
+ return
693
+
694
+ # Verify that build years align with investment periods
695
+ build_years = set(generators_with_build_year['build_year'].astype(int))
696
+ investment_years = set(years)
697
+
698
+ unmatched_build_years = build_years - investment_years
699
+ if unmatched_build_years:
700
+ logger.warning(f"Some generators have build years not in investment periods: {sorted(unmatched_build_years)}")
701
+
702
+ matched_build_years = build_years & investment_years
703
+ logger.info(f"Generators with build years matching investment periods: {sorted(matched_build_years)}")
704
+
705
+ # Store build year information for potential custom constraint application
706
+ network._build_year_info = {
707
+ 'generators_with_build_year': generators_with_build_year.index.tolist(),
708
+ 'build_years': generators_with_build_year['build_year'].to_dict(),
709
+ 'investment_periods': years,
710
+ 'extendable_generators': extendable_generators.index.tolist() if 'extendable_generators' in locals() else []
711
+ }
712
+
713
+ logger.info("Build year constraint configuration completed")
714
+
715
+ except Exception as e:
716
+ logger.error(f"Failed to configure build year constraints: {e}")
717
+ logger.exception("Full traceback:")
718
+
719
+ def _extract_solve_results(self, network: 'pypsa.Network', result: Any, solve_time: float, solver_name: str, run_id: str) -> Dict[str, Any]:
720
+ """
721
+ Extract solve results from PyPSA network.
722
+
723
+ Args:
724
+ network: Solved PyPSA Network object
725
+ result: PyPSA solve result
726
+ solve_time: Time taken to solve
727
+ solver_name: Name of solver used
728
+ run_id: Unique run identifier
729
+
730
+ Returns:
731
+ Dictionary with solve results and metadata
732
+ """
733
+ try:
734
+ # Extract basic solve information
735
+ status = getattr(result, 'status', 'unknown')
736
+ objective_value = getattr(network, 'objective', None)
737
+
738
+ # Debug logging
739
+ logger.info(f"Raw PyPSA result attributes: {dir(result) if result else 'None'}")
740
+ if hasattr(result, 'termination_condition'):
741
+ logger.info(f"Termination condition: {result.termination_condition}")
742
+ if hasattr(result, 'solver'):
743
+ logger.info(f"Solver info: {result.solver}")
744
+
745
+ # Convert PyPSA result to dictionary format
746
+ result_dict = self._convert_pypsa_result_to_dict(result)
747
+
748
+ # Determine success based on multiple criteria
749
+ success = self._determine_solve_success(result, network, status, objective_value)
750
+
751
+ solve_result = {
752
+ "success": success,
753
+ "status": status,
754
+ "solve_time": solve_time,
755
+ "solver_name": solver_name,
756
+ "run_id": run_id,
757
+ "objective_value": objective_value,
758
+ "pypsa_result": result_dict,
759
+ "network_name": network.name,
760
+ "num_buses": len(network.buses),
761
+ "num_generators": len(network.generators),
762
+ "num_loads": len(network.loads),
763
+ "num_lines": len(network.lines),
764
+ "num_links": len(network.links),
765
+ "num_snapshots": len(network.snapshots)
766
+ }
767
+
768
+ # Add multi-period information if available
769
+ if hasattr(network, '_available_years') and network._available_years:
770
+ solve_result["years"] = network._available_years
771
+ solve_result["multi_period"] = len(network._available_years) > 1
772
+
773
+ return solve_result
774
+
775
+ except Exception as e:
776
+ logger.error(f"Failed to extract solve results: {e}")
777
+ return {
778
+ "success": False,
779
+ "status": "extraction_failed",
780
+ "error": f"Failed to extract results: {e}",
781
+ "solve_time": solve_time,
782
+ "solver_name": solver_name,
783
+ "run_id": run_id,
784
+ "objective_value": None
785
+ }
786
+
787
+ def _determine_solve_success(self, result: Any, network: 'pypsa.Network', status: str, objective_value: Optional[float]) -> bool:
788
+ """
789
+ Determine if solve was successful based on multiple criteria.
790
+
791
+ PyPSA sometimes returns status='unknown' even for successful solves,
792
+ so we need to check multiple indicators.
793
+ """
794
+ try:
795
+ # Check explicit status first
796
+ if status in ['optimal', 'feasible']:
797
+ logger.info(f"Success determined by status: {status}")
798
+ return True
799
+
800
+ # Check termination condition
801
+ if hasattr(result, 'termination_condition'):
802
+ term_condition = str(result.termination_condition).lower()
803
+ if 'optimal' in term_condition:
804
+ logger.info(f"Success determined by termination condition: {result.termination_condition}")
805
+ return True
806
+
807
+ # Check if we have a valid objective value
808
+ if objective_value is not None and not (objective_value == 0 and status == 'unknown'):
809
+ logger.info(f"Success determined by valid objective value: {objective_value}")
810
+ return True
811
+
812
+ # Check solver-specific success indicators
813
+ if hasattr(result, 'solver'):
814
+ solver_info = result.solver
815
+ if hasattr(solver_info, 'termination_condition'):
816
+ term_condition = str(solver_info.termination_condition).lower()
817
+ if 'optimal' in term_condition:
818
+ logger.info(f"Success determined by solver termination condition: {solver_info.termination_condition}")
819
+ return True
820
+
821
+ logger.warning(f"Could not determine success: status={status}, objective={objective_value}, result_attrs={dir(result) if result else 'None'}")
822
+ return False
823
+
824
+ except Exception as e:
825
+ logger.error(f"Error determining solve success: {e}")
826
+ return False
827
+
828
+ def _convert_pypsa_result_to_dict(self, result) -> Dict[str, Any]:
829
+ """
830
+ Convert PyPSA result object to dictionary.
831
+
832
+ Args:
833
+ result: PyPSA solve result object
834
+
835
+ Returns:
836
+ Dictionary representation of the result
837
+ """
838
+ try:
839
+ if result is None:
840
+ return {"status": "no_result"}
841
+
842
+ result_dict = {}
843
+
844
+ # Extract common attributes
845
+ for attr in ['status', 'success', 'termination_condition', 'solver']:
846
+ if hasattr(result, attr):
847
+ value = getattr(result, attr)
848
+ # Convert to serializable format
849
+ if hasattr(value, '__dict__'):
850
+ result_dict[attr] = str(value)
851
+ else:
852
+ result_dict[attr] = value
853
+
854
+ # Handle solver-specific information
855
+ if hasattr(result, 'solver_results'):
856
+ solver_results = getattr(result, 'solver_results')
857
+ if hasattr(solver_results, '__dict__'):
858
+ result_dict['solver_results'] = str(solver_results)
859
+ else:
860
+ result_dict['solver_results'] = solver_results
861
+
862
+ return result_dict
863
+
864
+ except Exception as e:
865
+ logger.warning(f"Failed to convert PyPSA result to dict: {e}")
866
+ return {"status": "conversion_failed", "error": str(e)}
867
+
868
+ def _calculate_comprehensive_network_statistics(self, network: 'pypsa.Network', solve_time: float, solver_name: str) -> Dict[str, Any]:
869
+ """Calculate comprehensive network statistics including PyPSA statistics and custom metrics"""
870
+ try:
871
+ # Initialize statistics structure
872
+ statistics = {
873
+ "core_summary": {},
874
+ "pypsa_statistics": {},
875
+ "custom_statistics": {},
876
+ "runtime_info": {},
877
+ "solver_info": {}
878
+ }
879
+
880
+ # Core summary statistics
881
+ total_generation = 0
882
+ total_demand = 0
883
+ unserved_energy = 0
884
+
885
+ # Calculate generation statistics
886
+ if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p'):
887
+ # Apply snapshot weightings to convert MW to MWh
888
+ weightings = network.snapshot_weightings
889
+ if isinstance(weightings, pd.DataFrame):
890
+ if 'objective' in weightings.columns:
891
+ weighting_values = weightings['objective'].values
892
+ else:
893
+ weighting_values = weightings.iloc[:, 0].values
894
+ else:
895
+ weighting_values = weightings.values
896
+
897
+ total_generation = float((network.generators_t.p.values * weighting_values[:, None]).sum())
898
+
899
+ # Calculate unserved energy from UNMET_LOAD generators
900
+ if hasattr(network, 'generators') and hasattr(network, '_component_type_map'):
901
+ unmet_load_gen_names = [name for name, comp_type in network._component_type_map.items()
902
+ if comp_type == 'UNMET_LOAD']
903
+
904
+ for gen_name in unmet_load_gen_names:
905
+ if gen_name in network.generators_t.p.columns:
906
+ gen_output = float((network.generators_t.p[gen_name] * weighting_values).sum())
907
+ unserved_energy += gen_output
908
+
909
+ # Calculate demand statistics
910
+ if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p'):
911
+ weightings = network.snapshot_weightings
912
+ if isinstance(weightings, pd.DataFrame):
913
+ if 'objective' in weightings.columns:
914
+ weighting_values = weightings['objective'].values
915
+ else:
916
+ weighting_values = weightings.iloc[:, 0].values
917
+ else:
918
+ weighting_values = weightings.values
919
+
920
+ total_demand = float((network.loads_t.p.values * weighting_values[:, None]).sum())
921
+
922
+ statistics["core_summary"] = {
923
+ "total_generation_mwh": total_generation,
924
+ "total_demand_mwh": total_demand,
925
+ "total_cost": float(network.objective) if hasattr(network, 'objective') else None,
926
+ "load_factor": (total_demand / (total_generation + 1e-6)) if total_generation > 0 else 0,
927
+ "unserved_energy_mwh": unserved_energy
928
+ }
929
+
930
+ # Calculate PyPSA statistics
931
+ try:
932
+ pypsa_stats = network.statistics()
933
+ if pypsa_stats is not None and not pypsa_stats.empty:
934
+ statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(pypsa_stats)
935
+ else:
936
+ statistics["pypsa_statistics"] = {}
937
+ except Exception as e:
938
+ logger.error(f"Failed to calculate PyPSA statistics: {e}")
939
+ statistics["pypsa_statistics"] = {}
940
+
941
+ # Custom statistics - calculate detailed breakdowns
942
+ total_cost = float(network.objective) if hasattr(network, 'objective') else 0.0
943
+ avg_price = (total_cost / (total_generation + 1e-6)) if total_generation > 0 else None
944
+ unmet_load_percentage = (unserved_energy / (total_demand + 1e-6)) * 100 if total_demand > 0 else 0
945
+
946
+ # Note: For solver statistics, we keep simplified approach since this is just for logging
947
+ # The storage module will calculate proper totals from carrier statistics
948
+ statistics["custom_statistics"] = {
949
+ "total_capital_cost": 0.0, # Will be calculated properly in storage module
950
+ "total_operational_cost": total_cost, # PyPSA objective (includes both capital and operational, discounted)
951
+ "total_currency_cost": total_cost,
952
+ "total_emissions_tons_co2": 0.0, # Will be calculated properly in storage module
953
+ "average_price_per_mwh": avg_price,
954
+ "unmet_load_percentage": unmet_load_percentage,
955
+ "max_unmet_load_hour_mw": 0.0 # TODO: Calculate max hourly unmet load
956
+ }
957
+
958
+ # Runtime info
959
+ unmet_load_count = 0
960
+ if hasattr(network, '_component_type_map'):
961
+ unmet_load_count = len([name for name, comp_type in network._component_type_map.items()
962
+ if comp_type == 'UNMET_LOAD'])
963
+
964
+ statistics["runtime_info"] = {
965
+ "solve_time_seconds": solve_time,
966
+ "component_count": (
967
+ len(network.buses) + len(network.generators) + len(network.loads) +
968
+ len(network.lines) + len(network.links)
969
+ ) if hasattr(network, 'buses') else 0,
970
+ "bus_count": len(network.buses) if hasattr(network, 'buses') else 0,
971
+ "generator_count": len(network.generators) if hasattr(network, 'generators') else 0,
972
+ "unmet_load_count": unmet_load_count,
973
+ "load_count": len(network.loads) if hasattr(network, 'loads') else 0,
974
+ "line_count": len(network.lines) if hasattr(network, 'lines') else 0,
975
+ "snapshot_count": len(network.snapshots) if hasattr(network, 'snapshots') else 0
976
+ }
977
+
978
+ # Solver info
979
+ statistics["solver_info"] = {
980
+ "solver_name": solver_name,
981
+ "termination_condition": "optimal" if hasattr(network, 'objective') else "unknown",
982
+ "objective_value": float(network.objective) if hasattr(network, 'objective') else None
983
+ }
984
+
985
+ return statistics
986
+
987
+ except Exception as e:
988
+ logger.error(f"Failed to calculate comprehensive network statistics: {e}", exc_info=True)
989
+ return {
990
+ "error": str(e),
991
+ "core_summary": {},
992
+ "pypsa_statistics": {},
993
+ "custom_statistics": {},
994
+ "runtime_info": {"solve_time_seconds": solve_time},
995
+ "solver_info": {"solver_name": solver_name}
996
+ }
997
+
998
+ def _calculate_statistics_by_year(self, network: 'pypsa.Network', solve_time: float, solver_name: str) -> Dict[int, Dict[str, Any]]:
999
+ """Calculate statistics for each year in the network"""
1000
+ try:
1001
+ # Extract years from network snapshots or manually extracted years
1002
+ if hasattr(network.snapshots, 'year'):
1003
+ years = sorted(network.snapshots.year.unique())
1004
+ elif hasattr(network, '_available_years'):
1005
+ years = network._available_years
1006
+ elif hasattr(network.snapshots, 'levels'):
1007
+ # Multi-period optimization - get years from period level
1008
+ period_values = network.snapshots.get_level_values(0)
1009
+ years = sorted(period_values.unique())
1010
+ else:
1011
+ # If no year info, skip year-based calculations
1012
+ logger.info("No year information found in network - skipping year-based statistics")
1013
+ return {}
1014
+
1015
+ logger.info(f"Calculating year-based statistics for years: {years}")
1016
+ year_statistics = {}
1017
+
1018
+ for year in years:
1019
+ try:
1020
+ year_stats = self._calculate_network_statistics_for_year(network, year, solve_time, solver_name)
1021
+ year_statistics[year] = year_stats
1022
+ logger.info(f"Calculated statistics for year {year}")
1023
+ except Exception as e:
1024
+ logger.error(f"Failed to calculate statistics for year {year}: {e}")
1025
+ continue
1026
+
1027
+ logger.info(f"Successfully calculated year-based statistics for {len(year_statistics)} years")
1028
+ return year_statistics
1029
+
1030
+ except Exception as e:
1031
+ logger.error(f"Failed to calculate year-based statistics: {e}", exc_info=True)
1032
+ return {}
1033
+
1034
+ def _calculate_network_statistics_for_year(self, network: 'pypsa.Network', year: int, solve_time: float, solver_name: str) -> Dict[str, Any]:
1035
+ """Calculate network statistics for a specific year"""
1036
+ try:
1037
+ # Initialize statistics structure
1038
+ statistics = {
1039
+ "core_summary": {},
1040
+ "custom_statistics": {},
1041
+ "runtime_info": {},
1042
+ "solver_info": {}
1043
+ }
1044
+
1045
+ # Core summary statistics for this year
1046
+ total_generation = 0
1047
+ total_demand = 0
1048
+ unserved_energy = 0
1049
+
1050
+ # Calculate generation statistics for this year
1051
+ if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p'):
1052
+ # Filter by year
1053
+ year_generation = self._filter_timeseries_by_year(network.generators_t.p, network.snapshots, year)
1054
+ if year_generation is not None and not year_generation.empty:
1055
+ # Apply snapshot weightings for this year
1056
+ year_weightings = self._get_year_weightings(network, year)
1057
+ if year_weightings is not None:
1058
+ total_generation = float((year_generation.values * year_weightings[:, None]).sum())
1059
+ else:
1060
+ total_generation = float(year_generation.sum().sum())
1061
+
1062
+ # Calculate unserved energy for this year
1063
+ if hasattr(network, '_component_type_map'):
1064
+ unmet_load_gen_names = [name for name, comp_type in network._component_type_map.items()
1065
+ if comp_type == 'UNMET_LOAD']
1066
+
1067
+ for gen_name in unmet_load_gen_names:
1068
+ if gen_name in year_generation.columns:
1069
+ if year_weightings is not None:
1070
+ gen_output = float((year_generation[gen_name] * year_weightings).sum())
1071
+ else:
1072
+ gen_output = float(year_generation[gen_name].sum())
1073
+ unserved_energy += gen_output
1074
+
1075
+ # Calculate demand statistics for this year
1076
+ if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p'):
1077
+ year_demand = self._filter_timeseries_by_year(network.loads_t.p, network.snapshots, year)
1078
+ if year_demand is not None and not year_demand.empty:
1079
+ year_weightings = self._get_year_weightings(network, year)
1080
+ if year_weightings is not None:
1081
+ total_demand = float((year_demand.values * year_weightings[:, None]).sum())
1082
+ else:
1083
+ total_demand = float(year_demand.sum().sum())
1084
+
1085
+ statistics["core_summary"] = {
1086
+ "total_generation_mwh": total_generation,
1087
+ "total_demand_mwh": total_demand,
1088
+ "total_cost": None, # Year-specific cost calculation would be complex
1089
+ "load_factor": (total_demand / (total_generation + 1e-6)) if total_generation > 0 else 0,
1090
+ "unserved_energy_mwh": unserved_energy
1091
+ }
1092
+
1093
+ # Custom statistics
1094
+ unmet_load_percentage = (unserved_energy / (total_demand + 1e-6)) * 100 if total_demand > 0 else 0
1095
+
1096
+ # Calculate year-specific carrier statistics
1097
+ year_carrier_stats = self._calculate_year_carrier_statistics(network, year)
1098
+
1099
+ statistics["custom_statistics"] = {
1100
+ "unmet_load_percentage": unmet_load_percentage,
1101
+ "year": year,
1102
+ **year_carrier_stats # Include all carrier-specific statistics for this year
1103
+ }
1104
+
1105
+ # Runtime info
1106
+ year_snapshot_count = self._count_year_snapshots(network.snapshots, year)
1107
+
1108
+ statistics["runtime_info"] = {
1109
+ "solve_time_seconds": solve_time,
1110
+ "year": year,
1111
+ "snapshot_count": year_snapshot_count
1112
+ }
1113
+
1114
+ # Solver info
1115
+ statistics["solver_info"] = {
1116
+ "solver_name": solver_name,
1117
+ "year": year
1118
+ }
1119
+
1120
+ return statistics
1121
+
1122
+ except Exception as e:
1123
+ logger.error(f"Failed to calculate network statistics for year {year}: {e}", exc_info=True)
1124
+ return {
1125
+ "error": str(e),
1126
+ "core_summary": {},
1127
+ "custom_statistics": {"year": year},
1128
+ "runtime_info": {"solve_time_seconds": solve_time, "year": year},
1129
+ "solver_info": {"solver_name": solver_name, "year": year}
1130
+ }
1131
+
1132
+ def _filter_timeseries_by_year(self, timeseries_df: 'pd.DataFrame', snapshots: 'pd.Index', year: int) -> 'pd.DataFrame':
1133
+ """Filter timeseries data by year"""
1134
+ try:
1135
+ # Handle MultiIndex case (multi-period optimization)
1136
+ if hasattr(snapshots, 'levels'):
1137
+ period_values = snapshots.get_level_values(0)
1138
+ year_mask = period_values == year
1139
+ if year_mask.any():
1140
+ year_snapshots = snapshots[year_mask]
1141
+ return timeseries_df.loc[year_snapshots]
1142
+
1143
+ # Handle DatetimeIndex case (regular time series)
1144
+ elif hasattr(snapshots, 'year'):
1145
+ year_mask = snapshots.year == year
1146
+ if year_mask.any():
1147
+ return timeseries_df.loc[year_mask]
1148
+
1149
+ # Fallback - return None if can't filter
1150
+ return None
1151
+
1152
+ except Exception as e:
1153
+ logger.error(f"Failed to filter timeseries by year {year}: {e}")
1154
+ return None
1155
+
1156
+ def _get_year_weightings(self, network: 'pypsa.Network', year: int) -> 'np.ndarray':
1157
+ """Get snapshot weightings for a specific year"""
1158
+ try:
1159
+ # Filter snapshot weightings by year
1160
+ if hasattr(network.snapshots, 'levels'):
1161
+ period_values = network.snapshots.get_level_values(0)
1162
+ year_mask = period_values == year
1163
+ if year_mask.any():
1164
+ year_snapshots = network.snapshots[year_mask]
1165
+ year_weightings = network.snapshot_weightings.loc[year_snapshots]
1166
+ if isinstance(year_weightings, pd.DataFrame):
1167
+ if 'objective' in year_weightings.columns:
1168
+ return year_weightings['objective'].values
1169
+ else:
1170
+ return year_weightings.iloc[:, 0].values
1171
+ else:
1172
+ return year_weightings.values
1173
+
1174
+ elif hasattr(network.snapshots, 'year'):
1175
+ year_mask = network.snapshots.year == year
1176
+ if year_mask.any():
1177
+ year_weightings = network.snapshot_weightings.loc[year_mask]
1178
+ if isinstance(year_weightings, pd.DataFrame):
1179
+ if 'objective' in year_weightings.columns:
1180
+ return year_weightings['objective'].values
1181
+ else:
1182
+ return year_weightings.iloc[:, 0].values
1183
+ else:
1184
+ return year_weightings.values
1185
+
1186
+ return None
1187
+
1188
+ except Exception as e:
1189
+ logger.error(f"Failed to get year weightings for year {year}: {e}")
1190
+ return None
1191
+
1192
+ def _count_year_snapshots(self, snapshots: 'pd.Index', year: int) -> int:
1193
+ """Count snapshots for a specific year"""
1194
+ try:
1195
+ # Handle MultiIndex case
1196
+ if hasattr(snapshots, 'levels'):
1197
+ period_values = snapshots.get_level_values(0)
1198
+ year_mask = period_values == year
1199
+ return year_mask.sum()
1200
+
1201
+ # Handle DatetimeIndex case
1202
+ elif hasattr(snapshots, 'year'):
1203
+ year_mask = snapshots.year == year
1204
+ return year_mask.sum()
1205
+
1206
+ # Fallback
1207
+ return 0
1208
+
1209
+ except Exception as e:
1210
+ logger.error(f"Failed to count snapshots for year {year}: {e}")
1211
+ return 0
1212
+
1213
+ def _calculate_year_carrier_statistics(self, network: 'pypsa.Network', year: int) -> Dict[str, Any]:
1214
+ """Calculate carrier-specific statistics for a specific year"""
1215
+ # Note: This is a simplified implementation that doesn't have database access
1216
+ # The proper implementation should be done in the storage module where we have conn and network_id
1217
+ # For now, return empty dictionaries - the storage module will handle this properly
1218
+ return {
1219
+ "dispatch_by_carrier": {},
1220
+ "capacity_by_carrier": {},
1221
+ "emissions_by_carrier": {},
1222
+ "capital_cost_by_carrier": {},
1223
+ "operational_cost_by_carrier": {},
1224
+ "total_system_cost_by_carrier": {}
1225
+ }
1226
+
1227
+ def _get_generator_carrier_name(self, generator_name: str) -> Optional[str]:
1228
+ """Get carrier name for a generator - simplified implementation"""
1229
+ # This is a simplified approach - in practice, this should query the database
1230
+ # or use the component type mapping from the network
1231
+
1232
+ # Try to extract carrier from generator name patterns
1233
+ gen_lower = generator_name.lower()
1234
+
1235
+ if 'coal' in gen_lower:
1236
+ return 'coal'
1237
+ elif 'gas' in gen_lower or 'ccgt' in gen_lower or 'ocgt' in gen_lower:
1238
+ return 'gas'
1239
+ elif 'nuclear' in gen_lower:
1240
+ return 'nuclear'
1241
+ elif 'solar' in gen_lower or 'pv' in gen_lower:
1242
+ return 'solar'
1243
+ elif 'wind' in gen_lower:
1244
+ return 'wind'
1245
+ elif 'hydro' in gen_lower:
1246
+ return 'hydro'
1247
+ elif 'biomass' in gen_lower:
1248
+ return 'biomass'
1249
+ elif 'battery' in gen_lower:
1250
+ return 'battery'
1251
+ elif 'unmet' in gen_lower:
1252
+ return 'Unmet Load'
1253
+ else:
1254
+ # Default to generator name if no pattern matches
1255
+ return generator_name