pyconvexity 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyconvexity might be problematic. Click here for more details.

Files changed (43) hide show
  1. pyconvexity/__init__.py +57 -8
  2. pyconvexity/_version.py +1 -2
  3. pyconvexity/core/__init__.py +0 -2
  4. pyconvexity/core/database.py +158 -0
  5. pyconvexity/core/types.py +105 -18
  6. pyconvexity/data/README.md +101 -0
  7. pyconvexity/data/__init__.py +18 -0
  8. pyconvexity/data/__pycache__/__init__.cpython-313.pyc +0 -0
  9. pyconvexity/data/loaders/__init__.py +3 -0
  10. pyconvexity/data/loaders/__pycache__/__init__.cpython-313.pyc +0 -0
  11. pyconvexity/data/loaders/__pycache__/cache.cpython-313.pyc +0 -0
  12. pyconvexity/data/loaders/cache.py +212 -0
  13. pyconvexity/data/schema/01_core_schema.sql +12 -12
  14. pyconvexity/data/schema/02_data_metadata.sql +17 -321
  15. pyconvexity/data/sources/__init__.py +5 -0
  16. pyconvexity/data/sources/__pycache__/__init__.cpython-313.pyc +0 -0
  17. pyconvexity/data/sources/__pycache__/gem.cpython-313.pyc +0 -0
  18. pyconvexity/data/sources/gem.py +412 -0
  19. pyconvexity/io/__init__.py +32 -0
  20. pyconvexity/io/excel_exporter.py +1012 -0
  21. pyconvexity/io/excel_importer.py +1109 -0
  22. pyconvexity/io/netcdf_exporter.py +192 -0
  23. pyconvexity/io/netcdf_importer.py +1602 -0
  24. pyconvexity/models/__init__.py +7 -0
  25. pyconvexity/models/attributes.py +209 -72
  26. pyconvexity/models/components.py +3 -0
  27. pyconvexity/models/network.py +17 -15
  28. pyconvexity/models/scenarios.py +177 -0
  29. pyconvexity/solvers/__init__.py +29 -0
  30. pyconvexity/solvers/pypsa/__init__.py +24 -0
  31. pyconvexity/solvers/pypsa/api.py +421 -0
  32. pyconvexity/solvers/pypsa/batch_loader.py +304 -0
  33. pyconvexity/solvers/pypsa/builder.py +566 -0
  34. pyconvexity/solvers/pypsa/constraints.py +321 -0
  35. pyconvexity/solvers/pypsa/solver.py +1106 -0
  36. pyconvexity/solvers/pypsa/storage.py +1574 -0
  37. pyconvexity/timeseries.py +327 -0
  38. pyconvexity/validation/rules.py +2 -2
  39. {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.4.dist-info}/METADATA +5 -2
  40. pyconvexity-0.1.4.dist-info/RECORD +46 -0
  41. pyconvexity-0.1.2.dist-info/RECORD +0 -20
  42. {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.4.dist-info}/WHEEL +0 -0
  43. {pyconvexity-0.1.2.dist-info → pyconvexity-0.1.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1106 @@
1
+ """
2
+ Solving functionality for PyPSA networks.
3
+
4
+ Simplified to always use multi-period optimization for consistency.
5
+ """
6
+
7
+ import logging
8
+ import time
9
+ import uuid
10
+ import pandas as pd
11
+ import numpy as np
12
+ from typing import Dict, Any, Optional
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class NetworkSolver:
18
+ """
19
+ Simplified PyPSA network solver that always uses multi-period optimization.
20
+
21
+ This ensures consistent behavior for both single-year and multi-year models.
22
+ """
23
+
24
+ def __init__(self):
25
+ # Import PyPSA with error handling
26
+ try:
27
+ import pypsa
28
+ self.pypsa = pypsa
29
+ except ImportError as e:
30
+ raise ImportError(
31
+ "PyPSA is not installed or could not be imported. "
32
+ "Please ensure it is installed correctly in the environment."
33
+ ) from e
34
+
35
+ def _get_user_settings_path(self):
36
+ """Get the path to the user settings file (same location as Tauri uses)"""
37
+ try:
38
+ import platform
39
+ import os
40
+ from pathlib import Path
41
+
42
+ system = platform.system()
43
+ if system == "Darwin": # macOS
44
+ home = Path.home()
45
+ app_data_dir = home / "Library" / "Application Support" / "com.convexity.desktop"
46
+ elif system == "Windows":
47
+ app_data_dir = Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
48
+ else: # Linux
49
+ home = Path.home()
50
+ app_data_dir = home / ".local" / "share" / "com.convexity.desktop"
51
+
52
+ settings_file = app_data_dir / "user_settings.json"
53
+ return settings_file if settings_file.exists() else None
54
+
55
+ except Exception as e:
56
+ logger.warning(f"Failed to determine user settings path: {e}")
57
+ return None
58
+
59
+ def _resolve_default_solver(self) -> str:
60
+ """Resolve 'default' solver to user's preferred solver"""
61
+ try:
62
+ import json
63
+
64
+ settings_path = self._get_user_settings_path()
65
+ if not settings_path:
66
+ logger.debug("User settings file not found, using 'highs' as default solver")
67
+ return 'highs'
68
+
69
+ with open(settings_path, 'r') as f:
70
+ user_settings = json.load(f)
71
+
72
+ # Get default solver from user settings
73
+ default_solver = user_settings.get('default_solver', 'highs')
74
+ logger.info(f"📖 Read default solver from user settings: {default_solver}")
75
+
76
+ # Validate that it's a known solver
77
+ known_solvers = ['highs', 'gurobi', 'gurobi (barrier)', 'gurobi (barrier homogeneous)',
78
+ 'gurobi (barrier+crossover balanced)', 'gurobi (dual simplex)', 'cplex', 'glpk', 'cbc', 'scip']
79
+
80
+ if default_solver in known_solvers:
81
+ return default_solver
82
+ else:
83
+ logger.warning(f"Unknown default solver '{default_solver}' in user settings, falling back to 'highs'")
84
+ return 'highs'
85
+
86
+ except Exception as e:
87
+ logger.warning(f"Failed to read default solver from user settings: {e}")
88
+ return 'highs'
89
+
90
+ def solve_network(
91
+ self,
92
+ network: 'pypsa.Network',
93
+ solver_name: str = "highs",
94
+ solver_options: Optional[Dict[str, Any]] = None,
95
+ discount_rate: Optional[float] = None,
96
+ job_id: Optional[str] = None,
97
+ conn=None,
98
+ network_id: Optional[int] = None,
99
+ scenario_id: Optional[int] = None,
100
+ constraint_applicator=None
101
+ ) -> Dict[str, Any]:
102
+ """
103
+ Solve PyPSA network and return results.
104
+
105
+ Args:
106
+ network: PyPSA Network object to solve
107
+ solver_name: Solver to use (default: "highs")
108
+ solver_options: Optional solver-specific options
109
+ discount_rate: Optional discount rate for multi-period optimization
110
+ job_id: Optional job ID for tracking
111
+
112
+ Returns:
113
+ Dictionary with solve results and metadata
114
+
115
+ Raises:
116
+ ImportError: If PyPSA is not available
117
+ Exception: If solving fails
118
+ """
119
+ start_time = time.time()
120
+ run_id = str(uuid.uuid4())
121
+
122
+ logger.info(f"Starting network solve with {solver_name}")
123
+
124
+ try:
125
+ # Get solver configuration
126
+ actual_solver_name, solver_config = self._get_solver_config(solver_name, solver_options)
127
+
128
+
129
+ years = list(network.investment_periods)
130
+ effective_discount_rate = discount_rate if discount_rate is not None else 0.05 # Default 5%
131
+
132
+ logger.info(f"Multi-period optimization with {len(years)} periods: {years}")
133
+ logger.info(f"Discount rate: {effective_discount_rate}")
134
+
135
+ # Calculate investment period weightings with discount rate
136
+ self._calculate_investment_weightings(network, effective_discount_rate)
137
+
138
+ # Set snapshot weightings after multi-period setup
139
+ if conn and network_id:
140
+ self._set_snapshot_weightings_after_multiperiod(conn, network_id, network)
141
+
142
+ # Prepare optimization constraints (extra_functionality)
143
+ extra_functionality = None
144
+ if conn and network_id and constraint_applicator:
145
+ optimization_constraints = constraint_applicator.get_optimization_constraints(conn, network_id, scenario_id)
146
+ if optimization_constraints:
147
+ logger.info(f"Applying {len(optimization_constraints)} optimization-time constraints")
148
+ extra_functionality = self._create_extra_functionality(optimization_constraints, constraint_applicator)
149
+
150
+ # Solver diagnostics
151
+ logger.info(f"=== PYPSA SOLVER DIAGNOSTICS ===")
152
+ logger.info(f"Solver: {actual_solver_name}")
153
+ logger.info(f"Investment periods: {years}")
154
+ logger.info(f"Snapshots: {len(network.snapshots)} (MultiIndex)")
155
+ if solver_config:
156
+ logger.info(f"Solver options: {solver_config}")
157
+ logger.info(f"=== END PYPSA SOLVER DIAGNOSTICS ===")
158
+
159
+ # Always solve with multi-period optimization
160
+ logger.info(f"Solving network with multi-period optimization using {actual_solver_name}")
161
+
162
+ # DEBUG: Check network structure before solving
163
+ logger.info(f"DEBUG: Network snapshots type: {type(network.snapshots)}")
164
+ logger.info(f"DEBUG: Network snapshots names: {getattr(network.snapshots, 'names', 'No names')}")
165
+ logger.info(f"DEBUG: Network snapshots shape: {len(network.snapshots)}")
166
+ logger.info(f"DEBUG: First 3 snapshots: {network.snapshots[:3].tolist()}")
167
+
168
+ # Check some timeseries data structure
169
+ if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p_max_pu'):
170
+ if not network.generators_t.p_max_pu.empty:
171
+ logger.info(f"DEBUG: generators_t.p_max_pu type: {type(network.generators_t.p_max_pu)}")
172
+ logger.info(f"DEBUG: generators_t.p_max_pu index type: {type(network.generators_t.p_max_pu.index)}")
173
+ logger.info(f"DEBUG: generators_t.p_max_pu index names: {getattr(network.generators_t.p_max_pu.index, 'names', 'No names')}")
174
+ logger.info(f"DEBUG: generators_t.p_max_pu shape: {network.generators_t.p_max_pu.shape}")
175
+ logger.info(f"DEBUG: First 3 p_max_pu index values: {network.generators_t.p_max_pu.index[:3].tolist()}")
176
+
177
+ if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p_set'):
178
+ if not network.loads_t.p_set.empty:
179
+ logger.info(f"DEBUG: loads_t.p_set type: {type(network.loads_t.p_set)}")
180
+ logger.info(f"DEBUG: loads_t.p_set index type: {type(network.loads_t.p_set.index)}")
181
+ logger.info(f"DEBUG: loads_t.p_set index names: {getattr(network.loads_t.p_set.index, 'names', 'No names')}")
182
+ logger.info(f"DEBUG: loads_t.p_set shape: {network.loads_t.p_set.shape}")
183
+ logger.info(f"DEBUG: First 3 p_set index values: {network.loads_t.p_set.index[:3].tolist()}")
184
+
185
+ if solver_config:
186
+ result = network.optimize(solver_name=actual_solver_name, multi_investment_periods=True,
187
+ extra_functionality=extra_functionality, **solver_config)
188
+ else:
189
+ result = network.optimize(solver_name=actual_solver_name, multi_investment_periods=True,
190
+ extra_functionality=extra_functionality)
191
+
192
+ solve_time = time.time() - start_time
193
+
194
+ # Post-solve debug logging (matches old code)
195
+ objective_value = getattr(network, 'objective', None)
196
+ if objective_value is not None:
197
+ logger.info(f"[DEBUG] POST-SOLVE snapshot_weightings structure:")
198
+ if hasattr(network, 'snapshot_weightings'):
199
+ logger.info(f"[DEBUG] Type: {type(network.snapshot_weightings)}")
200
+ logger.info(f"[DEBUG] Columns: {list(network.snapshot_weightings.columns)}")
201
+ logger.info(f"[DEBUG] Shape: {network.snapshot_weightings.shape}")
202
+ logger.info(f"[DEBUG] Unique values in objective column: {network.snapshot_weightings['objective'].unique()}")
203
+ logger.info(f"[DEBUG] Sum of objective column: {network.snapshot_weightings['objective'].sum()}")
204
+
205
+ if hasattr(network, 'investment_period_weightings'):
206
+ logger.info(f"[DEBUG] investment_period_weightings exists:")
207
+ logger.info(f"[DEBUG] Type: {type(network.investment_period_weightings)}")
208
+ logger.info(f"[DEBUG] Content:\n{network.investment_period_weightings}")
209
+
210
+ # Extract solve results with comprehensive statistics
211
+ solve_result = self._extract_solve_results(network, result, solve_time, actual_solver_name, run_id)
212
+
213
+ # Calculate comprehensive network statistics (all years combined)
214
+ if solve_result.get('success'):
215
+ logger.info("Calculating comprehensive network statistics...")
216
+ network_statistics = self._calculate_comprehensive_network_statistics(network, solve_time, actual_solver_name)
217
+ solve_result['network_statistics'] = network_statistics
218
+
219
+ # Calculate year-based statistics for capacity expansion analysis
220
+ logger.info("Calculating year-based statistics...")
221
+ year_statistics = self._calculate_statistics_by_year(network, solve_time, actual_solver_name)
222
+ solve_result['year_statistics'] = year_statistics
223
+ solve_result['year_statistics_available'] = len(year_statistics) > 0
224
+
225
+ logger.info(f"Solve completed in {solve_time:.2f} seconds with status: {solve_result['status']}")
226
+ logger.info(f"PyPSA result object: {result}")
227
+ logger.info(f"PyPSA result status: {getattr(result, 'status', 'no status attr')}")
228
+ logger.info(f"Network objective: {getattr(network, 'objective', 'no objective')}")
229
+ logger.info(f"Solve result success: {solve_result.get('success')}")
230
+
231
+ return solve_result
232
+
233
+ except Exception as e:
234
+ solve_time = time.time() - start_time
235
+ logger.error(f"Solve failed after {solve_time:.2f} seconds: {e}")
236
+ logger.exception("Full solve error traceback:")
237
+
238
+ return {
239
+ "success": False,
240
+ "status": "failed",
241
+ "error": str(e),
242
+ "solve_time": solve_time,
243
+ "solver_name": actual_solver_name if 'actual_solver_name' in locals() else solver_name,
244
+ "run_id": run_id,
245
+ "objective_value": None
246
+ }
247
+
248
+ def _get_solver_config(self, solver_name: str, solver_options: Optional[Dict[str, Any]] = None) -> tuple[str, Optional[Dict[str, Any]]]:
249
+ """
250
+ Get the actual solver name and options for special solver configurations.
251
+
252
+ Args:
253
+ solver_name: The solver name (e.g., 'gurobi (barrier)', 'highs')
254
+ solver_options: Optional additional solver options
255
+
256
+ Returns:
257
+ Tuple of (actual_solver_name, solver_options_dict)
258
+ """
259
+ # Handle "default" solver
260
+ if solver_name == 'default':
261
+ # Try to read user's default solver preference
262
+ actual_solver = self._resolve_default_solver()
263
+ logger.info(f"Resolved 'default' solver to: {actual_solver}")
264
+ return actual_solver, solver_options
265
+
266
+ # Handle special Gurobi configurations
267
+ if solver_name == 'gurobi (barrier)':
268
+ gurobi_barrier_options = {
269
+ 'solver_options': {
270
+ 'Method': 2, # Barrier
271
+ 'Crossover': 0, # Skip crossover
272
+ 'MIPGap': 0.05, # 5% gap
273
+ 'Threads': 4, # Use all cores
274
+ 'Presolve': 2, # Aggressive presolve
275
+ 'ConcurrentMIP': 1, # Parallel root strategies
276
+ 'BarConvTol': 1e-4, # Relaxed barrier convergence
277
+ 'FeasibilityTol': 1e-5,
278
+ 'OptimalityTol': 1e-5,
279
+ 'NumericFocus': 1, # Improve stability
280
+ 'PreSparsify': 1,
281
+ }
282
+ }
283
+ # Merge with any additional options
284
+ if solver_options:
285
+ gurobi_barrier_options.update(solver_options)
286
+ return 'gurobi', gurobi_barrier_options
287
+
288
+ elif solver_name == 'gurobi (barrier homogeneous)':
289
+ gurobi_barrier_homogeneous_options = {
290
+ 'solver_options': {
291
+ 'Method': 2, # Barrier
292
+ 'Crossover': 0, # Skip crossover
293
+ 'MIPGap': 0.05,
294
+ 'Threads': 4,
295
+ 'Presolve': 2,
296
+ 'ConcurrentMIP': 1,
297
+ 'BarConvTol': 1e-4,
298
+ 'FeasibilityTol': 1e-5,
299
+ 'OptimalityTol': 1e-5,
300
+ 'NumericFocus': 1,
301
+ 'PreSparsify': 1,
302
+ 'BarHomogeneous': 1, # Enable homogeneous barrier algorithm
303
+ }
304
+ }
305
+ if solver_options:
306
+ gurobi_barrier_homogeneous_options.update(solver_options)
307
+ return 'gurobi', gurobi_barrier_homogeneous_options
308
+
309
+ elif solver_name == 'gurobi (barrier+crossover balanced)':
310
+ gurobi_options_balanced = {
311
+ 'solver_options': {
312
+ 'Method': 2,
313
+ 'Crossover': 1, # Dual crossover
314
+ 'MIPGap': 0.01,
315
+ 'Threads': 4,
316
+ 'Presolve': 2,
317
+ 'Heuristics': 0.1,
318
+ 'Cuts': 2,
319
+ 'ConcurrentMIP': 1,
320
+ 'BarConvTol': 1e-6,
321
+ 'FeasibilityTol': 1e-6,
322
+ 'OptimalityTol': 1e-6,
323
+ 'NumericFocus': 1,
324
+ 'PreSparsify': 1,
325
+ }
326
+ }
327
+ if solver_options:
328
+ gurobi_options_balanced.update(solver_options)
329
+ logger.info(f"Using Gurobi Barrier+Dual Crossover Balanced configuration")
330
+ return 'gurobi', gurobi_options_balanced
331
+
332
+ elif solver_name == 'gurobi (dual simplex)':
333
+ gurobi_dual_options = {
334
+ 'solver_options': {
335
+ 'Method': 1, # Dual simplex method
336
+ 'Threads': 0, # Use all available cores
337
+ 'Presolve': 2, # Aggressive presolve
338
+ }
339
+ }
340
+ if solver_options:
341
+ gurobi_dual_options.update(solver_options)
342
+ return 'gurobi', gurobi_dual_options
343
+
344
+ # Check if this is a known valid solver name
345
+ elif solver_name in ['highs', 'gurobi', 'cplex', 'glpk', 'cbc', 'scip']:
346
+ return solver_name, solver_options
347
+
348
+ else:
349
+ # Unknown solver name - log warning and fall back to highs
350
+ logger.warning(f"Unknown solver name '{solver_name}' - falling back to 'highs'")
351
+ return 'highs', solver_options
352
+
353
+
354
+ def _create_extra_functionality(self, optimization_constraints: list, constraint_applicator) -> callable:
355
+ """
356
+ Create extra_functionality function for optimization-time constraints.
357
+
358
+ This matches the old PyPSA solver's approach to applying constraints during optimization.
359
+
360
+ Args:
361
+ optimization_constraints: List of optimization constraint dictionaries
362
+ constraint_applicator: ConstraintApplicator instance
363
+
364
+ Returns:
365
+ Function that can be passed to network.optimize(extra_functionality=...)
366
+ """
367
+ def extra_functionality(network, snapshots):
368
+ """Apply optimization constraints during solve - matches old code structure"""
369
+ try:
370
+ logger.info(f"Applying {len(optimization_constraints)} optimization constraints during solve")
371
+
372
+ # Apply each constraint in priority order
373
+ sorted_constraints = sorted(optimization_constraints, key=lambda x: x.get('priority', 0))
374
+
375
+ for constraint in sorted_constraints:
376
+ try:
377
+ constraint_applicator.apply_optimization_constraint(network, snapshots, constraint)
378
+ except Exception as e:
379
+ logger.error(f"Failed to apply optimization constraint {constraint.get('name', 'unknown')}: {e}")
380
+ continue
381
+
382
+ logger.info("Optimization constraints applied successfully")
383
+
384
+ except Exception as e:
385
+ logger.error(f"Failed to apply optimization constraints: {e}")
386
+ # Don't re-raise - let optimization continue
387
+
388
+ return extra_functionality
389
+
390
+ def _set_snapshot_weightings_after_multiperiod(self, conn, network_id: int, network: 'pypsa.Network'):
391
+ """Set snapshot weightings AFTER multi-period setup - matches old code approach."""
392
+ try:
393
+ from pyconvexity.models import get_network_time_periods, get_network_info
394
+
395
+ time_periods = get_network_time_periods(conn, network_id)
396
+ if time_periods and len(network.snapshots) > 0:
397
+ logger.info(f"Setting snapshot weightings AFTER multi-period setup for {len(time_periods)} time periods")
398
+
399
+ # Get network info to determine time interval (stored in networks table, not network_config)
400
+ network_info = get_network_info(conn, network_id)
401
+ time_interval = network_info.get('time_interval', '1H')
402
+ weight = self._parse_time_interval(time_interval)
403
+
404
+ if weight is None:
405
+ weight = 1.0
406
+ logger.warning(f"Could not parse time interval '{time_interval}', using default weight of 1.0")
407
+
408
+ logger.info(f"Parsed time interval '{time_interval}' -> weight = {weight}")
409
+
410
+ # Create weightings array - all snapshots get the same weight for this time resolution
411
+ weightings = [weight] * len(time_periods)
412
+
413
+ if len(weightings) == len(network.snapshots):
414
+ # Set all three columns like the old code - critical for proper objective calculation
415
+ network.snapshot_weightings.loc[:, 'objective'] = weightings
416
+ network.snapshot_weightings.loc[:, 'generators'] = weightings
417
+ network.snapshot_weightings.loc[:, 'stores'] = weightings
418
+ logger.info(f"Set snapshot weightings AFTER multi-period setup: objective, generators, stores columns")
419
+
420
+ # Debug logging like old code
421
+ logger.info(f"Snapshot weightings shape: {network.snapshot_weightings.shape}")
422
+ logger.info(f"Unique values in objective column: {network.snapshot_weightings['objective'].unique()}")
423
+ logger.info(f"Sum of objective column: {network.snapshot_weightings['objective'].sum()}")
424
+ logger.info(f"Weight per snapshot: {weight} hours")
425
+ else:
426
+ logger.warning(f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})")
427
+ except Exception as e:
428
+ logger.warning(f"Failed to set snapshot weightings after multi-period setup: {e}")
429
+ logger.exception("Full traceback:")
430
+
431
+ def _parse_time_interval(self, time_interval: str) -> Optional[float]:
432
+ """Parse time interval string to hours - handles multiple formats."""
433
+ if not time_interval:
434
+ return None
435
+
436
+ try:
437
+ # Clean up the string
438
+ interval = time_interval.strip()
439
+
440
+ # Handle ISO 8601 duration format (PT3H, PT30M, etc.)
441
+ if interval.startswith('PT') and interval.endswith('H'):
442
+ # Extract hours (e.g., 'PT3H' -> 3.0)
443
+ hours_str = interval[2:-1] # Remove 'PT' and 'H'
444
+ return float(hours_str)
445
+ elif interval.startswith('PT') and interval.endswith('M'):
446
+ # Extract minutes (e.g., 'PT30M' -> 0.5)
447
+ minutes_str = interval[2:-1] # Remove 'PT' and 'M'
448
+ return float(minutes_str) / 60.0
449
+ elif interval.startswith('PT') and interval.endswith('S'):
450
+ # Extract seconds (e.g., 'PT3600S' -> 1.0)
451
+ seconds_str = interval[2:-1] # Remove 'PT' and 'S'
452
+ return float(seconds_str) / 3600.0
453
+
454
+ # Handle simple frequency strings (3H, 2D, etc.)
455
+ elif interval.endswith('H') or interval.endswith('h'):
456
+ hours_str = interval[:-1]
457
+ return float(hours_str) if hours_str else 1.0
458
+ elif interval.endswith('D') or interval.endswith('d'):
459
+ days_str = interval[:-1]
460
+ return float(days_str) * 24 if days_str else 24.0
461
+ elif interval.endswith('M') or interval.endswith('m'):
462
+ minutes_str = interval[:-1]
463
+ return float(minutes_str) / 60.0 if minutes_str else 1.0/60.0
464
+ elif interval.endswith('S') or interval.endswith('s'):
465
+ seconds_str = interval[:-1]
466
+ return float(seconds_str) / 3600.0 if seconds_str else 1.0/3600.0
467
+
468
+ # Try to parse as plain number (assume hours)
469
+ else:
470
+ return float(interval)
471
+
472
+ except (ValueError, TypeError) as e:
473
+ logger.warning(f"Could not parse time interval '{time_interval}': {e}")
474
+ return None
475
+
476
+ def _calculate_investment_weightings(self, network: 'pypsa.Network', discount_rate: float) -> None:
477
+ """
478
+ Calculate investment period weightings using discount rate - matching old PyPSA solver exactly.
479
+
480
+ Args:
481
+ network: PyPSA Network object
482
+ discount_rate: Discount rate for NPV calculations
483
+ """
484
+ try:
485
+ import pandas as pd
486
+
487
+ if not hasattr(network, 'investment_periods') or len(network.investment_periods) == 0:
488
+ return
489
+
490
+ years = network.investment_periods
491
+ # Convert pandas Index to list for easier handling
492
+ years_list = years.tolist() if hasattr(years, 'tolist') else list(years)
493
+
494
+ logger.info(f"Calculating investment weightings for periods: {years_list} with discount rate: {discount_rate}")
495
+
496
+ # For single year, use simple weighting of 1.0
497
+ if len(years_list) == 1:
498
+ # Single year case
499
+ network.investment_period_weightings = pd.DataFrame({
500
+ 'objective': pd.Series({years_list[0]: 1.0}),
501
+ 'years': pd.Series({years_list[0]: 1})
502
+ })
503
+ logger.info(f"Set single-year investment period weightings for year {years_list[0]}")
504
+ else:
505
+ # Multi-year case - EXACTLY match old code logic
506
+ # Get unique years from the network snapshots to determine period lengths
507
+ if hasattr(network.snapshots, 'year'):
508
+ snapshot_years = sorted(network.snapshots.year.unique())
509
+ elif hasattr(network.snapshots, 'get_level_values'):
510
+ # MultiIndex case - get years from 'period' level
511
+ snapshot_years = sorted(network.snapshots.get_level_values('period').unique())
512
+ else:
513
+ # Fallback: use investment periods as years
514
+ snapshot_years = years_list
515
+
516
+ logger.info(f"Snapshot years found: {snapshot_years}")
517
+
518
+ # Calculate years per period - EXACTLY matching old code
519
+ years_diff = []
520
+ for i, year in enumerate(years_list):
521
+ if i < len(years_list) - 1:
522
+ # Years between this period and the next
523
+ next_year = years_list[i + 1]
524
+ period_years = next_year - year
525
+ else:
526
+ # For the last period, calculate based on snapshot coverage
527
+ if snapshot_years:
528
+ # Find the last snapshot year that's >= current period year
529
+ last_snapshot_year = max([y for y in snapshot_years if y >= year])
530
+ period_years = last_snapshot_year - year + 1
531
+ else:
532
+ # Fallback: assume same length as previous period or 1
533
+ if len(years_diff) > 0:
534
+ period_years = years_diff[-1] # Same as previous period
535
+ else:
536
+ period_years = 1
537
+
538
+ years_diff.append(period_years)
539
+ logger.info(f"Period {year}: {period_years} years")
540
+
541
+ # Create weightings DataFrame with years column
542
+ weightings_df = pd.DataFrame({
543
+ 'years': pd.Series(years_diff, index=years_list)
544
+ })
545
+
546
+ # Calculate objective weightings with discount rate - EXACTLY matching old code
547
+ r = discount_rate
548
+ T = 0 # Cumulative time tracker
549
+
550
+ logger.info(f"Calculating discount factors with rate {r}:")
551
+ for period, nyears in weightings_df.years.items():
552
+ # Calculate discount factors for each year in this period
553
+ discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
554
+ period_weighting = sum(discounts)
555
+ weightings_df.at[period, "objective"] = period_weighting
556
+
557
+ logger.info(f" Period {period}: years {T} to {T + nyears - 1}, discounts={[f'{d:.4f}' for d in discounts]}, sum={period_weighting:.4f}")
558
+ T += nyears # Update cumulative time
559
+
560
+ network.investment_period_weightings = weightings_df
561
+ logger.info(f"Final investment period weightings:")
562
+ logger.info(f" Years: {weightings_df['years'].to_dict()}")
563
+ logger.info(f" Objective: {weightings_df['objective'].to_dict()}")
564
+
565
+ except Exception as e:
566
+ logger.error(f"Failed to calculate investment weightings: {e}")
567
+ logger.exception("Full traceback:")
568
+
569
+
570
+ def _extract_solve_results(self, network: 'pypsa.Network', result: Any, solve_time: float, solver_name: str, run_id: str) -> Dict[str, Any]:
571
+ """
572
+ Extract solve results from PyPSA network.
573
+
574
+ Args:
575
+ network: Solved PyPSA Network object
576
+ result: PyPSA solve result
577
+ solve_time: Time taken to solve
578
+ solver_name: Name of solver used
579
+ run_id: Unique run identifier
580
+
581
+ Returns:
582
+ Dictionary with solve results and metadata
583
+ """
584
+ try:
585
+ # Extract basic solve information
586
+ status = getattr(result, 'status', 'unknown')
587
+ objective_value = getattr(network, 'objective', None)
588
+
589
+ # Debug logging
590
+ logger.info(f"Raw PyPSA result attributes: {dir(result) if result else 'None'}")
591
+ if hasattr(result, 'termination_condition'):
592
+ logger.info(f"Termination condition: {result.termination_condition}")
593
+ if hasattr(result, 'solver'):
594
+ logger.info(f"Solver info: {result.solver}")
595
+
596
+ # Convert PyPSA result to dictionary format
597
+ result_dict = self._convert_pypsa_result_to_dict(result)
598
+
599
+ # Determine success based on multiple criteria
600
+ success = self._determine_solve_success(result, network, status, objective_value)
601
+
602
+ solve_result = {
603
+ "success": success,
604
+ "status": status,
605
+ "solve_time": solve_time,
606
+ "solver_name": solver_name,
607
+ "run_id": run_id,
608
+ "objective_value": objective_value,
609
+ "pypsa_result": result_dict,
610
+ "network_name": network.name,
611
+ "num_buses": len(network.buses),
612
+ "num_generators": len(network.generators),
613
+ "num_loads": len(network.loads),
614
+ "num_lines": len(network.lines),
615
+ "num_links": len(network.links),
616
+ "num_snapshots": len(network.snapshots)
617
+ }
618
+
619
+ # Add multi-period information if available
620
+ if hasattr(network, '_available_years') and network._available_years:
621
+ solve_result["years"] = network._available_years
622
+ solve_result["multi_period"] = len(network._available_years) > 1
623
+
624
+ return solve_result
625
+
626
+ except Exception as e:
627
+ logger.error(f"Failed to extract solve results: {e}")
628
+ return {
629
+ "success": False,
630
+ "status": "extraction_failed",
631
+ "error": f"Failed to extract results: {e}",
632
+ "solve_time": solve_time,
633
+ "solver_name": solver_name,
634
+ "run_id": run_id,
635
+ "objective_value": None
636
+ }
637
+
638
+ def _determine_solve_success(self, result: Any, network: 'pypsa.Network', status: str, objective_value: Optional[float]) -> bool:
639
+ """
640
+ Determine if solve was successful based on multiple criteria.
641
+
642
+ PyPSA sometimes returns status='unknown' even for successful solves,
643
+ so we need to check multiple indicators.
644
+ """
645
+ try:
646
+ # Check explicit status first
647
+ if status in ['optimal', 'feasible']:
648
+ logger.info(f"Success determined by status: {status}")
649
+ return True
650
+
651
+ # Check termination condition
652
+ if hasattr(result, 'termination_condition'):
653
+ term_condition = str(result.termination_condition).lower()
654
+ if 'optimal' in term_condition:
655
+ logger.info(f"Success determined by termination condition: {result.termination_condition}")
656
+ return True
657
+
658
+ # Check if we have a valid objective value
659
+ if objective_value is not None and not (objective_value == 0 and status == 'unknown'):
660
+ logger.info(f"Success determined by valid objective value: {objective_value}")
661
+ return True
662
+
663
+ # Check solver-specific success indicators
664
+ if hasattr(result, 'solver'):
665
+ solver_info = result.solver
666
+ if hasattr(solver_info, 'termination_condition'):
667
+ term_condition = str(solver_info.termination_condition).lower()
668
+ if 'optimal' in term_condition:
669
+ logger.info(f"Success determined by solver termination condition: {solver_info.termination_condition}")
670
+ return True
671
+
672
+ logger.warning(f"Could not determine success: status={status}, objective={objective_value}, result_attrs={dir(result) if result else 'None'}")
673
+ return False
674
+
675
+ except Exception as e:
676
+ logger.error(f"Error determining solve success: {e}")
677
+ return False
678
+
679
+ def _convert_pypsa_result_to_dict(self, result) -> Dict[str, Any]:
680
+ """
681
+ Convert PyPSA result object to dictionary.
682
+
683
+ Args:
684
+ result: PyPSA solve result object
685
+
686
+ Returns:
687
+ Dictionary representation of the result
688
+ """
689
+ try:
690
+ if result is None:
691
+ return {"status": "no_result"}
692
+
693
+ result_dict = {}
694
+
695
+ # Extract common attributes
696
+ for attr in ['status', 'success', 'termination_condition', 'solver']:
697
+ if hasattr(result, attr):
698
+ value = getattr(result, attr)
699
+ # Convert to serializable format
700
+ if hasattr(value, '__dict__'):
701
+ result_dict[attr] = str(value)
702
+ else:
703
+ result_dict[attr] = value
704
+
705
+ # Handle solver-specific information
706
+ if hasattr(result, 'solver_results'):
707
+ solver_results = getattr(result, 'solver_results')
708
+ if hasattr(solver_results, '__dict__'):
709
+ result_dict['solver_results'] = str(solver_results)
710
+ else:
711
+ result_dict['solver_results'] = solver_results
712
+
713
+ return result_dict
714
+
715
+ except Exception as e:
716
+ logger.warning(f"Failed to convert PyPSA result to dict: {e}")
717
+ return {"status": "conversion_failed", "error": str(e)}
718
+
719
+ def _calculate_comprehensive_network_statistics(self, network: 'pypsa.Network', solve_time: float, solver_name: str) -> Dict[str, Any]:
720
+ """Calculate comprehensive network statistics including PyPSA statistics and custom metrics"""
721
+ try:
722
+ # Initialize statistics structure
723
+ statistics = {
724
+ "core_summary": {},
725
+ "pypsa_statistics": {},
726
+ "custom_statistics": {},
727
+ "runtime_info": {},
728
+ "solver_info": {}
729
+ }
730
+
731
+ # Core summary statistics
732
+ total_generation = 0
733
+ total_demand = 0
734
+ unserved_energy = 0
735
+
736
+ # Calculate generation statistics
737
+ if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p'):
738
+ # Apply snapshot weightings to convert MW to MWh
739
+ weightings = network.snapshot_weightings
740
+ if isinstance(weightings, pd.DataFrame):
741
+ if 'objective' in weightings.columns:
742
+ weighting_values = weightings['objective'].values
743
+ else:
744
+ weighting_values = weightings.iloc[:, 0].values
745
+ else:
746
+ weighting_values = weightings.values
747
+
748
+ total_generation = float((network.generators_t.p.values * weighting_values[:, None]).sum())
749
+
750
+ # Calculate unserved energy from UNMET_LOAD generators
751
+ if hasattr(network, 'generators') and hasattr(network, '_component_type_map'):
752
+ unmet_load_gen_names = [name for name, comp_type in network._component_type_map.items()
753
+ if comp_type == 'UNMET_LOAD']
754
+
755
+ for gen_name in unmet_load_gen_names:
756
+ if gen_name in network.generators_t.p.columns:
757
+ gen_output = float((network.generators_t.p[gen_name] * weighting_values).sum())
758
+ unserved_energy += gen_output
759
+
760
+ # Calculate demand statistics
761
+ if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p'):
762
+ weightings = network.snapshot_weightings
763
+ if isinstance(weightings, pd.DataFrame):
764
+ if 'objective' in weightings.columns:
765
+ weighting_values = weightings['objective'].values
766
+ else:
767
+ weighting_values = weightings.iloc[:, 0].values
768
+ else:
769
+ weighting_values = weightings.values
770
+
771
+ total_demand = float((network.loads_t.p.values * weighting_values[:, None]).sum())
772
+
773
+ statistics["core_summary"] = {
774
+ "total_generation_mwh": total_generation,
775
+ "total_demand_mwh": total_demand,
776
+ "total_cost": float(network.objective) if hasattr(network, 'objective') else None,
777
+ "load_factor": (total_demand / (total_generation + 1e-6)) if total_generation > 0 else 0,
778
+ "unserved_energy_mwh": unserved_energy
779
+ }
780
+
781
+ # Calculate PyPSA statistics
782
+ try:
783
+ pypsa_stats = network.statistics()
784
+ if pypsa_stats is not None and not pypsa_stats.empty:
785
+ statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(pypsa_stats)
786
+ else:
787
+ statistics["pypsa_statistics"] = {}
788
+ except Exception as e:
789
+ logger.error(f"Failed to calculate PyPSA statistics: {e}")
790
+ statistics["pypsa_statistics"] = {}
791
+
792
+ # Custom statistics - calculate detailed breakdowns
793
+ total_cost = float(network.objective) if hasattr(network, 'objective') else 0.0
794
+ avg_price = (total_cost / (total_generation + 1e-6)) if total_generation > 0 else None
795
+ unmet_load_percentage = (unserved_energy / (total_demand + 1e-6)) * 100 if total_demand > 0 else 0
796
+
797
+ # Note: For solver statistics, we keep simplified approach since this is just for logging
798
+ # The storage module will calculate proper totals from carrier statistics
799
+ statistics["custom_statistics"] = {
800
+ "total_capital_cost": 0.0, # Will be calculated properly in storage module
801
+ "total_operational_cost": total_cost, # PyPSA objective (includes both capital and operational, discounted)
802
+ "total_currency_cost": total_cost,
803
+ "total_emissions_tons_co2": 0.0, # Will be calculated properly in storage module
804
+ "average_price_per_mwh": avg_price,
805
+ "unmet_load_percentage": unmet_load_percentage,
806
+ "max_unmet_load_hour_mw": 0.0 # TODO: Calculate max hourly unmet load
807
+ }
808
+
809
+ # Runtime info
810
+ unmet_load_count = 0
811
+ if hasattr(network, '_component_type_map'):
812
+ unmet_load_count = len([name for name, comp_type in network._component_type_map.items()
813
+ if comp_type == 'UNMET_LOAD'])
814
+
815
+ statistics["runtime_info"] = {
816
+ "solve_time_seconds": solve_time,
817
+ "component_count": (
818
+ len(network.buses) + len(network.generators) + len(network.loads) +
819
+ len(network.lines) + len(network.links)
820
+ ) if hasattr(network, 'buses') else 0,
821
+ "bus_count": len(network.buses) if hasattr(network, 'buses') else 0,
822
+ "generator_count": len(network.generators) if hasattr(network, 'generators') else 0,
823
+ "unmet_load_count": unmet_load_count,
824
+ "load_count": len(network.loads) if hasattr(network, 'loads') else 0,
825
+ "line_count": len(network.lines) if hasattr(network, 'lines') else 0,
826
+ "snapshot_count": len(network.snapshots) if hasattr(network, 'snapshots') else 0
827
+ }
828
+
829
+ # Solver info
830
+ statistics["solver_info"] = {
831
+ "solver_name": solver_name,
832
+ "termination_condition": "optimal" if hasattr(network, 'objective') else "unknown",
833
+ "objective_value": float(network.objective) if hasattr(network, 'objective') else None
834
+ }
835
+
836
+ return statistics
837
+
838
+ except Exception as e:
839
+ logger.error(f"Failed to calculate comprehensive network statistics: {e}", exc_info=True)
840
+ return {
841
+ "error": str(e),
842
+ "core_summary": {},
843
+ "pypsa_statistics": {},
844
+ "custom_statistics": {},
845
+ "runtime_info": {"solve_time_seconds": solve_time},
846
+ "solver_info": {"solver_name": solver_name}
847
+ }
848
+
849
+ def _calculate_statistics_by_year(self, network: 'pypsa.Network', solve_time: float, solver_name: str) -> Dict[int, Dict[str, Any]]:
850
+ """Calculate statistics for each year in the network"""
851
+ try:
852
+ # Extract years from network snapshots or manually extracted years
853
+ if hasattr(network.snapshots, 'year'):
854
+ years = sorted(network.snapshots.year.unique())
855
+ elif hasattr(network, '_available_years'):
856
+ years = network._available_years
857
+ elif hasattr(network.snapshots, 'levels'):
858
+ # Multi-period optimization - get years from period level
859
+ period_values = network.snapshots.get_level_values(0)
860
+ years = sorted(period_values.unique())
861
+ else:
862
+ # If no year info, skip year-based calculations
863
+ logger.info("No year information found in network - skipping year-based statistics")
864
+ return {}
865
+
866
+ logger.info(f"Calculating year-based statistics for years: {years}")
867
+ year_statistics = {}
868
+
869
+ for year in years:
870
+ try:
871
+ year_stats = self._calculate_network_statistics_for_year(network, year, solve_time, solver_name)
872
+ year_statistics[year] = year_stats
873
+ logger.info(f"Calculated statistics for year {year}")
874
+ except Exception as e:
875
+ logger.error(f"Failed to calculate statistics for year {year}: {e}")
876
+ continue
877
+
878
+ logger.info(f"Successfully calculated year-based statistics for {len(year_statistics)} years")
879
+ return year_statistics
880
+
881
+ except Exception as e:
882
+ logger.error(f"Failed to calculate year-based statistics: {e}", exc_info=True)
883
+ return {}
884
+
885
+ def _calculate_network_statistics_for_year(self, network: 'pypsa.Network', year: int, solve_time: float, solver_name: str) -> Dict[str, Any]:
886
+ """Calculate network statistics for a specific year"""
887
+ try:
888
+ # Initialize statistics structure
889
+ statistics = {
890
+ "core_summary": {},
891
+ "custom_statistics": {},
892
+ "runtime_info": {},
893
+ "solver_info": {}
894
+ }
895
+
896
+ # Core summary statistics for this year
897
+ total_generation = 0
898
+ total_demand = 0
899
+ unserved_energy = 0
900
+
901
+ # Calculate generation statistics for this year
902
+ if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p'):
903
+ # Filter by year
904
+ year_generation = self._filter_timeseries_by_year(network.generators_t.p, network.snapshots, year)
905
+ if year_generation is not None and not year_generation.empty:
906
+ # Apply snapshot weightings for this year
907
+ year_weightings = self._get_year_weightings(network, year)
908
+ if year_weightings is not None:
909
+ total_generation = float((year_generation.values * year_weightings[:, None]).sum())
910
+ else:
911
+ total_generation = float(year_generation.sum().sum())
912
+
913
+ # Calculate unserved energy for this year
914
+ if hasattr(network, '_component_type_map'):
915
+ unmet_load_gen_names = [name for name, comp_type in network._component_type_map.items()
916
+ if comp_type == 'UNMET_LOAD']
917
+
918
+ for gen_name in unmet_load_gen_names:
919
+ if gen_name in year_generation.columns:
920
+ if year_weightings is not None:
921
+ gen_output = float((year_generation[gen_name] * year_weightings).sum())
922
+ else:
923
+ gen_output = float(year_generation[gen_name].sum())
924
+ unserved_energy += gen_output
925
+
926
+ # Calculate demand statistics for this year
927
+ if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p'):
928
+ year_demand = self._filter_timeseries_by_year(network.loads_t.p, network.snapshots, year)
929
+ if year_demand is not None and not year_demand.empty:
930
+ year_weightings = self._get_year_weightings(network, year)
931
+ if year_weightings is not None:
932
+ total_demand = float((year_demand.values * year_weightings[:, None]).sum())
933
+ else:
934
+ total_demand = float(year_demand.sum().sum())
935
+
936
+ statistics["core_summary"] = {
937
+ "total_generation_mwh": total_generation,
938
+ "total_demand_mwh": total_demand,
939
+ "total_cost": None, # Year-specific cost calculation would be complex
940
+ "load_factor": (total_demand / (total_generation + 1e-6)) if total_generation > 0 else 0,
941
+ "unserved_energy_mwh": unserved_energy
942
+ }
943
+
944
+ # Custom statistics
945
+ unmet_load_percentage = (unserved_energy / (total_demand + 1e-6)) * 100 if total_demand > 0 else 0
946
+
947
+ # Calculate year-specific carrier statistics
948
+ year_carrier_stats = self._calculate_year_carrier_statistics(network, year)
949
+
950
+ statistics["custom_statistics"] = {
951
+ "unmet_load_percentage": unmet_load_percentage,
952
+ "year": year,
953
+ **year_carrier_stats # Include all carrier-specific statistics for this year
954
+ }
955
+
956
+ # Runtime info
957
+ year_snapshot_count = self._count_year_snapshots(network.snapshots, year)
958
+
959
+ statistics["runtime_info"] = {
960
+ "solve_time_seconds": solve_time,
961
+ "year": year,
962
+ "snapshot_count": year_snapshot_count
963
+ }
964
+
965
+ # Solver info
966
+ statistics["solver_info"] = {
967
+ "solver_name": solver_name,
968
+ "year": year
969
+ }
970
+
971
+ return statistics
972
+
973
+ except Exception as e:
974
+ logger.error(f"Failed to calculate network statistics for year {year}: {e}", exc_info=True)
975
+ return {
976
+ "error": str(e),
977
+ "core_summary": {},
978
+ "custom_statistics": {"year": year},
979
+ "runtime_info": {"solve_time_seconds": solve_time, "year": year},
980
+ "solver_info": {"solver_name": solver_name, "year": year}
981
+ }
982
+
983
+ def _filter_timeseries_by_year(self, timeseries_df: 'pd.DataFrame', snapshots: 'pd.Index', year: int) -> 'pd.DataFrame':
984
+ """Filter timeseries data by year"""
985
+ try:
986
+ # Handle MultiIndex case (multi-period optimization)
987
+ if hasattr(snapshots, 'levels'):
988
+ period_values = snapshots.get_level_values(0)
989
+ year_mask = period_values == year
990
+ if year_mask.any():
991
+ year_snapshots = snapshots[year_mask]
992
+ return timeseries_df.loc[year_snapshots]
993
+
994
+ # Handle DatetimeIndex case (regular time series)
995
+ elif hasattr(snapshots, 'year'):
996
+ year_mask = snapshots.year == year
997
+ if year_mask.any():
998
+ return timeseries_df.loc[year_mask]
999
+
1000
+ # Fallback - return None if can't filter
1001
+ return None
1002
+
1003
+ except Exception as e:
1004
+ logger.error(f"Failed to filter timeseries by year {year}: {e}")
1005
+ return None
1006
+
1007
+ def _get_year_weightings(self, network: 'pypsa.Network', year: int) -> 'np.ndarray':
1008
+ """Get snapshot weightings for a specific year"""
1009
+ try:
1010
+ # Filter snapshot weightings by year
1011
+ if hasattr(network.snapshots, 'levels'):
1012
+ period_values = network.snapshots.get_level_values(0)
1013
+ year_mask = period_values == year
1014
+ if year_mask.any():
1015
+ year_snapshots = network.snapshots[year_mask]
1016
+ year_weightings = network.snapshot_weightings.loc[year_snapshots]
1017
+ if isinstance(year_weightings, pd.DataFrame):
1018
+ if 'objective' in year_weightings.columns:
1019
+ return year_weightings['objective'].values
1020
+ else:
1021
+ return year_weightings.iloc[:, 0].values
1022
+ else:
1023
+ return year_weightings.values
1024
+
1025
+ elif hasattr(network.snapshots, 'year'):
1026
+ year_mask = network.snapshots.year == year
1027
+ if year_mask.any():
1028
+ year_weightings = network.snapshot_weightings.loc[year_mask]
1029
+ if isinstance(year_weightings, pd.DataFrame):
1030
+ if 'objective' in year_weightings.columns:
1031
+ return year_weightings['objective'].values
1032
+ else:
1033
+ return year_weightings.iloc[:, 0].values
1034
+ else:
1035
+ return year_weightings.values
1036
+
1037
+ return None
1038
+
1039
+ except Exception as e:
1040
+ logger.error(f"Failed to get year weightings for year {year}: {e}")
1041
+ return None
1042
+
1043
+ def _count_year_snapshots(self, snapshots: 'pd.Index', year: int) -> int:
1044
+ """Count snapshots for a specific year"""
1045
+ try:
1046
+ # Handle MultiIndex case
1047
+ if hasattr(snapshots, 'levels'):
1048
+ period_values = snapshots.get_level_values(0)
1049
+ year_mask = period_values == year
1050
+ return year_mask.sum()
1051
+
1052
+ # Handle DatetimeIndex case
1053
+ elif hasattr(snapshots, 'year'):
1054
+ year_mask = snapshots.year == year
1055
+ return year_mask.sum()
1056
+
1057
+ # Fallback
1058
+ return 0
1059
+
1060
+ except Exception as e:
1061
+ logger.error(f"Failed to count snapshots for year {year}: {e}")
1062
+ return 0
1063
+
1064
+ def _calculate_year_carrier_statistics(self, network: 'pypsa.Network', year: int) -> Dict[str, Any]:
1065
+ """Calculate carrier-specific statistics for a specific year"""
1066
+ # Note: This is a simplified implementation that doesn't have database access
1067
+ # The proper implementation should be done in the storage module where we have conn and network_id
1068
+ # For now, return empty dictionaries - the storage module will handle this properly
1069
+ return {
1070
+ "dispatch_by_carrier": {},
1071
+ "capacity_by_carrier": {},
1072
+ "emissions_by_carrier": {},
1073
+ "capital_cost_by_carrier": {},
1074
+ "operational_cost_by_carrier": {},
1075
+ "total_system_cost_by_carrier": {}
1076
+ }
1077
+
1078
+ def _get_generator_carrier_name(self, generator_name: str) -> Optional[str]:
1079
+ """Get carrier name for a generator - simplified implementation"""
1080
+ # This is a simplified approach - in practice, this should query the database
1081
+ # or use the component type mapping from the network
1082
+
1083
+ # Try to extract carrier from generator name patterns
1084
+ gen_lower = generator_name.lower()
1085
+
1086
+ if 'coal' in gen_lower:
1087
+ return 'coal'
1088
+ elif 'gas' in gen_lower or 'ccgt' in gen_lower or 'ocgt' in gen_lower:
1089
+ return 'gas'
1090
+ elif 'nuclear' in gen_lower:
1091
+ return 'nuclear'
1092
+ elif 'solar' in gen_lower or 'pv' in gen_lower:
1093
+ return 'solar'
1094
+ elif 'wind' in gen_lower:
1095
+ return 'wind'
1096
+ elif 'hydro' in gen_lower:
1097
+ return 'hydro'
1098
+ elif 'biomass' in gen_lower:
1099
+ return 'biomass'
1100
+ elif 'battery' in gen_lower:
1101
+ return 'battery'
1102
+ elif 'unmet' in gen_lower:
1103
+ return 'Unmet Load'
1104
+ else:
1105
+ # Default to generator name if no pattern matches
1106
+ return generator_name