pyconvexity 0.3.8.post7__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. pyconvexity/__init__.py +87 -46
  2. pyconvexity/_version.py +1 -1
  3. pyconvexity/core/__init__.py +3 -5
  4. pyconvexity/core/database.py +111 -103
  5. pyconvexity/core/errors.py +16 -10
  6. pyconvexity/core/types.py +61 -54
  7. pyconvexity/data/__init__.py +0 -1
  8. pyconvexity/data/loaders/cache.py +65 -64
  9. pyconvexity/data/schema/01_core_schema.sql +134 -234
  10. pyconvexity/data/schema/02_data_metadata.sql +38 -168
  11. pyconvexity/data/schema/03_validation_data.sql +327 -264
  12. pyconvexity/data/sources/gem.py +169 -139
  13. pyconvexity/io/__init__.py +4 -10
  14. pyconvexity/io/excel_exporter.py +694 -480
  15. pyconvexity/io/excel_importer.py +817 -545
  16. pyconvexity/io/netcdf_exporter.py +66 -61
  17. pyconvexity/io/netcdf_importer.py +850 -619
  18. pyconvexity/models/__init__.py +109 -59
  19. pyconvexity/models/attributes.py +197 -178
  20. pyconvexity/models/carriers.py +70 -67
  21. pyconvexity/models/components.py +260 -236
  22. pyconvexity/models/network.py +202 -284
  23. pyconvexity/models/results.py +65 -55
  24. pyconvexity/models/scenarios.py +58 -88
  25. pyconvexity/solvers/__init__.py +5 -5
  26. pyconvexity/solvers/pypsa/__init__.py +3 -3
  27. pyconvexity/solvers/pypsa/api.py +150 -134
  28. pyconvexity/solvers/pypsa/batch_loader.py +165 -162
  29. pyconvexity/solvers/pypsa/builder.py +390 -291
  30. pyconvexity/solvers/pypsa/constraints.py +184 -162
  31. pyconvexity/solvers/pypsa/solver.py +968 -666
  32. pyconvexity/solvers/pypsa/storage.py +1377 -671
  33. pyconvexity/timeseries.py +63 -60
  34. pyconvexity/validation/__init__.py +14 -6
  35. pyconvexity/validation/rules.py +95 -84
  36. pyconvexity-0.4.1.dist-info/METADATA +46 -0
  37. pyconvexity-0.4.1.dist-info/RECORD +42 -0
  38. pyconvexity/data/__pycache__/__init__.cpython-313.pyc +0 -0
  39. pyconvexity/data/loaders/__pycache__/__init__.cpython-313.pyc +0 -0
  40. pyconvexity/data/loaders/__pycache__/cache.cpython-313.pyc +0 -0
  41. pyconvexity/data/schema/04_scenario_schema.sql +0 -122
  42. pyconvexity/data/schema/migrate_add_geometries.sql +0 -73
  43. pyconvexity/data/sources/__pycache__/__init__.cpython-313.pyc +0 -0
  44. pyconvexity/data/sources/__pycache__/gem.cpython-313.pyc +0 -0
  45. pyconvexity-0.3.8.post7.dist-info/METADATA +0 -138
  46. pyconvexity-0.3.8.post7.dist-info/RECORD +0 -49
  47. {pyconvexity-0.3.8.post7.dist-info → pyconvexity-0.4.1.dist-info}/WHEEL +0 -0
  48. {pyconvexity-0.3.8.post7.dist-info → pyconvexity-0.4.1.dist-info}/top_level.txt +0 -0
@@ -17,96 +17,121 @@ logger = logging.getLogger(__name__)
17
17
  class NetworkSolver:
18
18
  """
19
19
  Simplified PyPSA network solver that always uses multi-period optimization.
20
-
20
+
21
21
  This ensures consistent behavior for both single-year and multi-year models.
22
22
  """
23
-
23
+
24
24
  def __init__(self):
25
25
  # Import PyPSA with error handling
26
26
  try:
27
27
  import pypsa
28
+
28
29
  self.pypsa = pypsa
29
30
  except ImportError as e:
30
31
  raise ImportError(
31
32
  "PyPSA is not installed or could not be imported. "
32
33
  "Please ensure it is installed correctly in the environment."
33
34
  ) from e
34
-
35
+
35
36
  def _get_user_settings_path(self):
36
37
  """Get the path to the user settings file (same location as Tauri uses)"""
37
38
  try:
38
39
  import platform
39
40
  import os
40
41
  from pathlib import Path
41
-
42
+
42
43
  system = platform.system()
43
44
  if system == "Darwin": # macOS
44
45
  home = Path.home()
45
- app_data_dir = home / "Library" / "Application Support" / "com.convexity.desktop"
46
+ app_data_dir = (
47
+ home / "Library" / "Application Support" / "com.convexity.desktop"
48
+ )
46
49
  elif system == "Windows":
47
- app_data_dir = Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
50
+ app_data_dir = (
51
+ Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
52
+ )
48
53
  else: # Linux
49
54
  home = Path.home()
50
55
  app_data_dir = home / ".local" / "share" / "com.convexity.desktop"
51
-
56
+
52
57
  settings_file = app_data_dir / "user_settings.json"
53
58
  return settings_file if settings_file.exists() else None
54
-
59
+
55
60
  except Exception as e:
56
61
  logger.warning(f"Failed to determine user settings path: {e}")
57
62
  return None
58
-
63
+
59
64
  def _resolve_default_solver(self) -> str:
60
65
  """Resolve 'default' solver to user's preferred solver"""
61
66
  try:
62
67
  import json
63
-
68
+
64
69
  settings_path = self._get_user_settings_path()
65
70
  if not settings_path:
66
- logger.debug("User settings file not found, using 'highs' as default solver")
67
- return 'highs'
68
-
69
- with open(settings_path, 'r') as f:
71
+ logger.debug(
72
+ "User settings file not found, using 'highs' as default solver"
73
+ )
74
+ return "highs"
75
+
76
+ with open(settings_path, "r") as f:
70
77
  user_settings = json.load(f)
71
-
78
+
72
79
  # Get default solver from user settings
73
- default_solver = user_settings.get('default_solver', 'highs')
80
+ default_solver = user_settings.get("default_solver", "highs")
74
81
  logger.info(f"📖 Read default solver from user settings: {default_solver}")
75
-
82
+
76
83
  # Validate that it's a known solver
77
- known_solvers = ['highs', 'gurobi', 'gurobi (barrier)', 'gurobi (barrier homogeneous)',
78
- 'gurobi (barrier+crossover balanced)', 'gurobi (dual simplex)',
79
- 'mosek', 'mosek (default)', 'mosek (barrier)', 'mosek (barrier+crossover)', 'mosek (dual simplex)',
80
- 'copt', 'copt (barrier)', 'copt (barrier homogeneous)', 'copt (barrier+crossover)',
81
- 'copt (dual simplex)', 'copt (concurrent)',
82
- 'cplex', 'glpk', 'cbc', 'scip']
83
-
84
+ known_solvers = [
85
+ "highs",
86
+ "gurobi",
87
+ "gurobi (barrier)",
88
+ "gurobi (barrier homogeneous)",
89
+ "gurobi (barrier+crossover balanced)",
90
+ "gurobi (dual simplex)",
91
+ "mosek",
92
+ "mosek (default)",
93
+ "mosek (barrier)",
94
+ "mosek (barrier+crossover)",
95
+ "mosek (dual simplex)",
96
+ "copt",
97
+ "copt (barrier)",
98
+ "copt (barrier homogeneous)",
99
+ "copt (barrier+crossover)",
100
+ "copt (dual simplex)",
101
+ "copt (concurrent)",
102
+ "cplex",
103
+ "glpk",
104
+ "cbc",
105
+ "scip",
106
+ ]
107
+
84
108
  if default_solver in known_solvers:
85
109
  return default_solver
86
110
  else:
87
- logger.warning(f"Unknown default solver '{default_solver}' in user settings, falling back to 'highs'")
88
- return 'highs'
89
-
111
+ logger.warning(
112
+ f"Unknown default solver '{default_solver}' in user settings, falling back to 'highs'"
113
+ )
114
+ return "highs"
115
+
90
116
  except Exception as e:
91
117
  logger.warning(f"Failed to read default solver from user settings: {e}")
92
- return 'highs'
93
-
118
+ return "highs"
119
+
94
120
  def solve_network(
95
121
  self,
96
- network: 'pypsa.Network',
122
+ network: "pypsa.Network",
97
123
  solver_name: str = "highs",
98
124
  solver_options: Optional[Dict[str, Any]] = None,
99
125
  discount_rate: Optional[float] = None,
100
126
  job_id: Optional[str] = None,
101
127
  conn=None,
102
- network_id: Optional[int] = None,
103
128
  scenario_id: Optional[int] = None,
104
129
  constraint_applicator=None,
105
- custom_solver_config: Optional[Dict[str, Any]] = None
130
+ custom_solver_config: Optional[Dict[str, Any]] = None,
106
131
  ) -> Dict[str, Any]:
107
132
  """
108
133
  Solve PyPSA network and return results.
109
-
134
+
110
135
  Args:
111
136
  network: PyPSA Network object to solve
112
137
  solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
@@ -116,71 +141,91 @@ class NetworkSolver:
116
141
  custom_solver_config: Optional custom solver configuration when solver_name="custom"
117
142
  Format: {"solver": "actual_solver_name", "solver_options": {...}}
118
143
  Example: {"solver": "gurobi", "solver_options": {"Method": 2, "Crossover": 0}}
119
-
144
+
120
145
  Returns:
121
146
  Dictionary with solve results and metadata
122
-
147
+
123
148
  Raises:
124
149
  ImportError: If PyPSA is not available
125
150
  Exception: If solving fails
126
151
  """
127
152
  start_time = time.time()
128
153
  run_id = str(uuid.uuid4())
129
-
154
+
130
155
  logger.info(f"Starting network solve with {solver_name}")
131
-
156
+
132
157
  try:
133
158
  # Get solver configuration
134
- actual_solver_name, solver_config = self._get_solver_config(solver_name, solver_options, custom_solver_config)
135
-
159
+ actual_solver_name, solver_config = self._get_solver_config(
160
+ solver_name, solver_options, custom_solver_config
161
+ )
162
+
136
163
  # Resolve discount rate - fallback to 0.0 if None
137
164
  # Note: API layer (api.py) handles fetching from network_config before calling this
138
- effective_discount_rate = discount_rate if discount_rate is not None else 0.0
165
+ effective_discount_rate = (
166
+ discount_rate if discount_rate is not None else 0.0
167
+ )
139
168
  logger.info(f"Discount rate for solve: {effective_discount_rate}")
140
-
169
+
141
170
  years = list(network.investment_periods)
142
-
171
+
143
172
  logger.info(f"Multi-period optimization with {len(years)} periods: {years}")
144
-
173
+
145
174
  # Calculate investment period weightings with discount rate
146
175
  self._calculate_investment_weightings(network, effective_discount_rate)
147
-
176
+
148
177
  # Set snapshot weightings after multi-period setup
149
- if conn and network_id:
150
- self._set_snapshot_weightings_after_multiperiod(conn, network_id, network)
151
-
178
+ if conn:
179
+ self._set_snapshot_weightings_after_multiperiod(conn, network)
180
+
152
181
  # Prepare optimization constraints - ONLY model constraints
153
182
  # Network constraints were already applied before solve in api.py
154
183
  extra_functionality = None
155
184
  model_constraints = []
156
-
157
- if conn and network_id and constraint_applicator:
158
- optimization_constraints = constraint_applicator.get_optimization_constraints(conn, network_id, scenario_id)
185
+
186
+ if conn and constraint_applicator:
187
+ optimization_constraints = (
188
+ constraint_applicator.get_optimization_constraints(
189
+ conn, scenario_id
190
+ )
191
+ )
159
192
  if optimization_constraints:
160
- logger.info(f"Found {len(optimization_constraints)} optimization constraints")
161
-
193
+ logger.info(
194
+ f"Found {len(optimization_constraints)} optimization constraints"
195
+ )
196
+
162
197
  # Filter for model constraints only (network constraints already applied)
163
198
  for constraint in optimization_constraints:
164
- constraint_code = constraint.get('constraint_code', '')
199
+ constraint_code = constraint.get("constraint_code", "")
165
200
  constraint_type = self._detect_constraint_type(constraint_code)
166
- constraint_name = constraint.get('name', 'unknown')
167
-
201
+ constraint_name = constraint.get("name", "unknown")
202
+
168
203
  if constraint_type == "model_constraint":
169
204
  model_constraints.append(constraint)
170
- logger.info(f"Will apply model constraint during solve: {constraint_name}")
205
+ logger.info(
206
+ f"Will apply model constraint during solve: {constraint_name}"
207
+ )
171
208
  else:
172
- logger.info(f"Skipping network constraint (already applied): {constraint_name}")
173
-
174
- logger.info(f"Will apply {len(model_constraints)} model constraints during optimization")
175
-
209
+ logger.info(
210
+ f"Skipping network constraint (already applied): {constraint_name}"
211
+ )
212
+
213
+ logger.info(
214
+ f"Will apply {len(model_constraints)} model constraints during optimization"
215
+ )
216
+
176
217
  # Create extra_functionality for model constraints only
177
218
  if model_constraints:
178
- extra_functionality = self._create_extra_functionality(model_constraints, constraint_applicator)
179
- logger.info(f"Prepared {len(model_constraints)} model constraints for optimization-time application")
180
-
219
+ extra_functionality = self._create_extra_functionality(
220
+ model_constraints, constraint_applicator
221
+ )
222
+ logger.info(
223
+ f"Prepared {len(model_constraints)} model constraints for optimization-time application"
224
+ )
225
+
181
226
  # NOTE: Model constraints are applied DURING solve via extra_functionality
182
227
  # Network constraints were already applied to the network structure before solve
183
-
228
+
184
229
  # Solver diagnostics
185
230
  logger.info(f"=== PYPSA SOLVER DIAGNOSTICS ===")
186
231
  logger.info(f"Solver: {actual_solver_name}")
@@ -189,472 +234,560 @@ class NetworkSolver:
189
234
  if solver_config:
190
235
  logger.info(f"Solver options: {solver_config}")
191
236
  logger.info(f"=== END PYPSA SOLVER DIAGNOSTICS ===")
192
-
237
+
193
238
  # Always solve with multi-period optimization
194
- logger.info(f"Solving network with multi-period optimization using {actual_solver_name}")
195
-
239
+ logger.info(
240
+ f"Solving network with multi-period optimization using {actual_solver_name}"
241
+ )
242
+
196
243
  # DEBUG: Check network structure before solving
197
244
  logger.info(f"DEBUG: Network snapshots type: {type(network.snapshots)}")
198
- logger.info(f"DEBUG: Network snapshots names: {getattr(network.snapshots, 'names', 'No names')}")
245
+ logger.info(
246
+ f"DEBUG: Network snapshots names: {getattr(network.snapshots, 'names', 'No names')}"
247
+ )
199
248
  logger.info(f"DEBUG: Network snapshots shape: {len(network.snapshots)}")
200
249
  logger.info(f"DEBUG: First 3 snapshots: {network.snapshots[:3].tolist()}")
201
-
250
+
202
251
  # Check some timeseries data structure
203
- if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p_max_pu'):
252
+ if hasattr(network, "generators_t") and hasattr(
253
+ network.generators_t, "p_max_pu"
254
+ ):
204
255
  if not network.generators_t.p_max_pu.empty:
205
- logger.info(f"DEBUG: generators_t.p_max_pu type: {type(network.generators_t.p_max_pu)}")
206
- logger.info(f"DEBUG: generators_t.p_max_pu index type: {type(network.generators_t.p_max_pu.index)}")
207
- logger.info(f"DEBUG: generators_t.p_max_pu index names: {getattr(network.generators_t.p_max_pu.index, 'names', 'No names')}")
208
- logger.info(f"DEBUG: generators_t.p_max_pu shape: {network.generators_t.p_max_pu.shape}")
209
- logger.info(f"DEBUG: First 3 p_max_pu index values: {network.generators_t.p_max_pu.index[:3].tolist()}")
210
-
211
- if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p_set'):
256
+ logger.info(
257
+ f"DEBUG: generators_t.p_max_pu type: {type(network.generators_t.p_max_pu)}"
258
+ )
259
+ logger.info(
260
+ f"DEBUG: generators_t.p_max_pu index type: {type(network.generators_t.p_max_pu.index)}"
261
+ )
262
+ logger.info(
263
+ f"DEBUG: generators_t.p_max_pu index names: {getattr(network.generators_t.p_max_pu.index, 'names', 'No names')}"
264
+ )
265
+ logger.info(
266
+ f"DEBUG: generators_t.p_max_pu shape: {network.generators_t.p_max_pu.shape}"
267
+ )
268
+ logger.info(
269
+ f"DEBUG: First 3 p_max_pu index values: {network.generators_t.p_max_pu.index[:3].tolist()}"
270
+ )
271
+
272
+ if hasattr(network, "loads_t") and hasattr(network.loads_t, "p_set"):
212
273
  if not network.loads_t.p_set.empty:
213
- logger.info(f"DEBUG: loads_t.p_set type: {type(network.loads_t.p_set)}")
214
- logger.info(f"DEBUG: loads_t.p_set index type: {type(network.loads_t.p_set.index)}")
215
- logger.info(f"DEBUG: loads_t.p_set index names: {getattr(network.loads_t.p_set.index, 'names', 'No names')}")
216
- logger.info(f"DEBUG: loads_t.p_set shape: {network.loads_t.p_set.shape}")
217
- logger.info(f"DEBUG: First 3 p_set index values: {network.loads_t.p_set.index[:3].tolist()}")
218
-
274
+ logger.info(
275
+ f"DEBUG: loads_t.p_set type: {type(network.loads_t.p_set)}"
276
+ )
277
+ logger.info(
278
+ f"DEBUG: loads_t.p_set index type: {type(network.loads_t.p_set.index)}"
279
+ )
280
+ logger.info(
281
+ f"DEBUG: loads_t.p_set index names: {getattr(network.loads_t.p_set.index, 'names', 'No names')}"
282
+ )
283
+ logger.info(
284
+ f"DEBUG: loads_t.p_set shape: {network.loads_t.p_set.shape}"
285
+ )
286
+ logger.info(
287
+ f"DEBUG: First 3 p_set index values: {network.loads_t.p_set.index[:3].tolist()}"
288
+ )
289
+
219
290
  if solver_config:
220
- result = network.optimize(solver_name=actual_solver_name, multi_investment_periods=True,
221
- extra_functionality=extra_functionality, **solver_config)
291
+ result = network.optimize(
292
+ solver_name=actual_solver_name,
293
+ multi_investment_periods=True,
294
+ extra_functionality=extra_functionality,
295
+ **solver_config,
296
+ )
222
297
  else:
223
- result = network.optimize(solver_name=actual_solver_name, multi_investment_periods=True,
224
- extra_functionality=extra_functionality)
225
-
298
+ result = network.optimize(
299
+ solver_name=actual_solver_name,
300
+ multi_investment_periods=True,
301
+ extra_functionality=extra_functionality,
302
+ )
303
+
226
304
  solve_time = time.time() - start_time
227
-
305
+
228
306
  # Post-solve debug logging (matches old code)
229
- objective_value = getattr(network, 'objective', None)
307
+ objective_value = getattr(network, "objective", None)
230
308
  if objective_value is not None:
231
309
  logger.info(f"[DEBUG] POST-SOLVE snapshot_weightings structure:")
232
- if hasattr(network, 'snapshot_weightings'):
310
+ if hasattr(network, "snapshot_weightings"):
233
311
  logger.info(f"[DEBUG] Type: {type(network.snapshot_weightings)}")
234
- logger.info(f"[DEBUG] Columns: {list(network.snapshot_weightings.columns)}")
312
+ logger.info(
313
+ f"[DEBUG] Columns: {list(network.snapshot_weightings.columns)}"
314
+ )
235
315
  logger.info(f"[DEBUG] Shape: {network.snapshot_weightings.shape}")
236
- logger.info(f"[DEBUG] Unique values in objective column: {network.snapshot_weightings['objective'].unique()}")
237
- logger.info(f"[DEBUG] Sum of objective column: {network.snapshot_weightings['objective'].sum()}")
238
-
239
- if hasattr(network, 'investment_period_weightings'):
316
+ logger.info(
317
+ f"[DEBUG] Unique values in objective column: {network.snapshot_weightings['objective'].unique()}"
318
+ )
319
+ logger.info(
320
+ f"[DEBUG] Sum of objective column: {network.snapshot_weightings['objective'].sum()}"
321
+ )
322
+
323
+ if hasattr(network, "investment_period_weightings"):
240
324
  logger.info(f"[DEBUG] investment_period_weightings exists:")
241
- logger.info(f"[DEBUG] Type: {type(network.investment_period_weightings)}")
242
- logger.info(f"[DEBUG] Content:\n{network.investment_period_weightings}")
243
-
325
+ logger.info(
326
+ f"[DEBUG] Type: {type(network.investment_period_weightings)}"
327
+ )
328
+ logger.info(
329
+ f"[DEBUG] Content:\n{network.investment_period_weightings}"
330
+ )
331
+
244
332
  # Extract solve results with comprehensive statistics
245
- solve_result = self._extract_solve_results(network, result, solve_time, actual_solver_name, run_id)
246
-
333
+ solve_result = self._extract_solve_results(
334
+ network, result, solve_time, actual_solver_name, run_id
335
+ )
336
+
247
337
  # Calculate comprehensive network statistics (all years combined)
248
- if solve_result.get('success'):
338
+ if solve_result.get("success"):
249
339
  logger.info("Calculating comprehensive network statistics...")
250
- network_statistics = self._calculate_comprehensive_network_statistics(network, solve_time, actual_solver_name)
251
- solve_result['network_statistics'] = network_statistics
252
-
340
+ network_statistics = self._calculate_comprehensive_network_statistics(
341
+ network, solve_time, actual_solver_name
342
+ )
343
+ solve_result["network_statistics"] = network_statistics
344
+
253
345
  # Calculate year-based statistics for capacity expansion analysis
254
346
  logger.info("Calculating year-based statistics...")
255
- year_statistics = self._calculate_statistics_by_year(network, solve_time, actual_solver_name)
256
- solve_result['year_statistics'] = year_statistics
257
- solve_result['year_statistics_available'] = len(year_statistics) > 0
258
-
259
- logger.info(f"Solve completed in {solve_time:.2f} seconds with status: {solve_result['status']}")
347
+ year_statistics = self._calculate_statistics_by_year(
348
+ network, solve_time, actual_solver_name
349
+ )
350
+ solve_result["year_statistics"] = year_statistics
351
+ solve_result["year_statistics_available"] = len(year_statistics) > 0
352
+
353
+ logger.info(
354
+ f"Solve completed in {solve_time:.2f} seconds with status: {solve_result['status']}"
355
+ )
260
356
  logger.info(f"PyPSA result object: {result}")
261
- logger.info(f"PyPSA result status: {getattr(result, 'status', 'no status attr')}")
262
- logger.info(f"Network objective: {getattr(network, 'objective', 'no objective')}")
357
+ logger.info(
358
+ f"PyPSA result status: {getattr(result, 'status', 'no status attr')}"
359
+ )
360
+ logger.info(
361
+ f"Network objective: {getattr(network, 'objective', 'no objective')}"
362
+ )
263
363
  logger.info(f"Solve result success: {solve_result.get('success')}")
264
-
364
+
265
365
  return solve_result
266
-
366
+
267
367
  except Exception as e:
268
368
  solve_time = time.time() - start_time
269
369
  logger.error(f"Solve failed after {solve_time:.2f} seconds: {e}")
270
370
  logger.exception("Full solve error traceback:")
271
-
371
+
272
372
  return {
273
373
  "success": False,
274
374
  "status": "failed",
275
375
  "error": str(e),
276
376
  "solve_time": solve_time,
277
- "solver_name": actual_solver_name if 'actual_solver_name' in locals() else solver_name,
377
+ "solver_name": (
378
+ actual_solver_name
379
+ if "actual_solver_name" in locals()
380
+ else solver_name
381
+ ),
278
382
  "run_id": run_id,
279
- "objective_value": None
383
+ "objective_value": None,
280
384
  }
281
-
282
- def _get_solver_config(self, solver_name: str, solver_options: Optional[Dict[str, Any]] = None,
283
- custom_solver_config: Optional[Dict[str, Any]] = None) -> tuple[str, Optional[Dict[str, Any]]]:
385
+
386
+ def _get_solver_config(
387
+ self,
388
+ solver_name: str,
389
+ solver_options: Optional[Dict[str, Any]] = None,
390
+ custom_solver_config: Optional[Dict[str, Any]] = None,
391
+ ) -> tuple[str, Optional[Dict[str, Any]]]:
284
392
  """
285
393
  Get the actual solver name and options for special solver configurations.
286
-
394
+
287
395
  Args:
288
396
  solver_name: The solver name (e.g., 'gurobi (barrier)', 'highs', 'custom')
289
397
  solver_options: Optional additional solver options
290
398
  custom_solver_config: Optional custom solver configuration for solver_name='custom'
291
399
  Format: {"solver": "actual_solver_name", "solver_options": {...}}
292
-
400
+
293
401
  Returns:
294
402
  Tuple of (actual_solver_name, solver_options_dict)
295
403
  """
296
404
  # Handle "custom" solver with custom configuration
297
- if solver_name == 'custom':
405
+ if solver_name == "custom":
298
406
  if not custom_solver_config:
299
- raise ValueError("custom_solver_config must be provided when solver_name='custom'")
300
-
301
- if 'solver' not in custom_solver_config:
302
- raise ValueError("custom_solver_config must contain 'solver' key with the actual solver name")
303
-
304
- actual_solver = custom_solver_config['solver']
305
- custom_options = custom_solver_config.get('solver_options', {})
306
-
407
+ raise ValueError(
408
+ "custom_solver_config must be provided when solver_name='custom'"
409
+ )
410
+
411
+ if "solver" not in custom_solver_config:
412
+ raise ValueError(
413
+ "custom_solver_config must contain 'solver' key with the actual solver name"
414
+ )
415
+
416
+ actual_solver = custom_solver_config["solver"]
417
+ custom_options = custom_solver_config.get("solver_options", {})
418
+
307
419
  # Merge with any additional solver_options passed separately
308
420
  if solver_options:
309
- merged_options = {'solver_options': {**custom_options, **solver_options}}
421
+ merged_options = {
422
+ "solver_options": {**custom_options, **solver_options}
423
+ }
310
424
  else:
311
- merged_options = {'solver_options': custom_options} if custom_options else None
312
-
313
- logger.info(f"Using custom solver configuration: {actual_solver} with options: {custom_options}")
425
+ merged_options = (
426
+ {"solver_options": custom_options} if custom_options else None
427
+ )
428
+
429
+ logger.info(
430
+ f"Using custom solver configuration: {actual_solver} with options: {custom_options}"
431
+ )
314
432
  return actual_solver, merged_options
315
-
433
+
316
434
  # Handle "default" solver
317
- if solver_name == 'default':
435
+ if solver_name == "default":
318
436
  # Try to read user's default solver preference
319
437
  actual_solver = self._resolve_default_solver()
320
438
  logger.info(f"Resolved 'default' solver to: {actual_solver}")
321
439
  return actual_solver, solver_options
322
-
440
+
323
441
  # Handle special Gurobi configurations
324
- if solver_name == 'gurobi (barrier)':
442
+ if solver_name == "gurobi (barrier)":
325
443
  gurobi_barrier_options = {
326
- 'solver_options': {
327
- 'Method': 2, # Barrier
328
- 'Crossover': 0, # Skip crossover
329
- 'MIPGap': 0.05, # 5% gap
330
- 'Threads': 0, # Use all cores (0 = auto)
331
- 'Presolve': 2, # Aggressive presolve
332
- 'ConcurrentMIP': 1, # Parallel root strategies
333
- 'BarConvTol': 1e-4, # Relaxed barrier convergence
334
- 'FeasibilityTol': 1e-5,
335
- 'OptimalityTol': 1e-5,
336
- 'NumericFocus': 1, # Improve stability
337
- 'PreSparsify': 1,
444
+ "solver_options": {
445
+ "Method": 2, # Barrier
446
+ "Crossover": 0, # Skip crossover
447
+ "MIPGap": 0.05, # 5% gap
448
+ "Threads": 0, # Use all cores (0 = auto)
449
+ "Presolve": 2, # Aggressive presolve
450
+ "ConcurrentMIP": 1, # Parallel root strategies
451
+ "BarConvTol": 1e-4, # Relaxed barrier convergence
452
+ "FeasibilityTol": 1e-5,
453
+ "OptimalityTol": 1e-5,
454
+ "NumericFocus": 1, # Improve stability
455
+ "PreSparsify": 1,
338
456
  }
339
457
  }
340
458
  # Merge with any additional options
341
459
  if solver_options:
342
460
  gurobi_barrier_options.update(solver_options)
343
- return 'gurobi', gurobi_barrier_options
461
+ return "gurobi", gurobi_barrier_options
344
462
 
345
- elif solver_name == 'gurobi (barrier homogeneous)':
463
+ elif solver_name == "gurobi (barrier homogeneous)":
346
464
  gurobi_barrier_homogeneous_options = {
347
- 'solver_options': {
348
- 'Method': 2, # Barrier
349
- 'Crossover': 0, # Skip crossover
350
- 'MIPGap': 0.05,
351
- 'Threads': 0, # Use all cores (0 = auto)
352
- 'Presolve': 2,
353
- 'ConcurrentMIP': 1,
354
- 'BarConvTol': 1e-4,
355
- 'FeasibilityTol': 1e-5,
356
- 'OptimalityTol': 1e-5,
357
- 'NumericFocus': 1,
358
- 'PreSparsify': 1,
359
- 'BarHomogeneous': 1, # Enable homogeneous barrier algorithm
465
+ "solver_options": {
466
+ "Method": 2, # Barrier
467
+ "Crossover": 0, # Skip crossover
468
+ "MIPGap": 0.05,
469
+ "Threads": 0, # Use all cores (0 = auto)
470
+ "Presolve": 2,
471
+ "ConcurrentMIP": 1,
472
+ "BarConvTol": 1e-4,
473
+ "FeasibilityTol": 1e-5,
474
+ "OptimalityTol": 1e-5,
475
+ "NumericFocus": 1,
476
+ "PreSparsify": 1,
477
+ "BarHomogeneous": 1, # Enable homogeneous barrier algorithm
360
478
  }
361
479
  }
362
480
  if solver_options:
363
481
  gurobi_barrier_homogeneous_options.update(solver_options)
364
- return 'gurobi', gurobi_barrier_homogeneous_options
482
+ return "gurobi", gurobi_barrier_homogeneous_options
365
483
 
366
- elif solver_name == 'gurobi (barrier+crossover balanced)':
484
+ elif solver_name == "gurobi (barrier+crossover balanced)":
367
485
  gurobi_options_balanced = {
368
- 'solver_options': {
369
- 'Method': 2,
370
- 'Crossover': 1, # Dual crossover
371
- 'MIPGap': 0.01,
372
- 'Threads': 0, # Use all cores (0 = auto)
373
- 'Presolve': 2,
374
- 'Heuristics': 0.1,
375
- 'Cuts': 2,
376
- 'ConcurrentMIP': 1,
377
- 'BarConvTol': 1e-6,
378
- 'FeasibilityTol': 1e-6,
379
- 'OptimalityTol': 1e-6,
380
- 'NumericFocus': 1,
381
- 'PreSparsify': 1,
486
+ "solver_options": {
487
+ "Method": 2,
488
+ "Crossover": 1, # Dual crossover
489
+ "MIPGap": 0.01,
490
+ "Threads": 0, # Use all cores (0 = auto)
491
+ "Presolve": 2,
492
+ "Heuristics": 0.1,
493
+ "Cuts": 2,
494
+ "ConcurrentMIP": 1,
495
+ "BarConvTol": 1e-6,
496
+ "FeasibilityTol": 1e-6,
497
+ "OptimalityTol": 1e-6,
498
+ "NumericFocus": 1,
499
+ "PreSparsify": 1,
382
500
  }
383
501
  }
384
502
  if solver_options:
385
503
  gurobi_options_balanced.update(solver_options)
386
504
  logger.info(f"Using Gurobi Barrier+Dual Crossover Balanced configuration")
387
- return 'gurobi', gurobi_options_balanced
505
+ return "gurobi", gurobi_options_balanced
388
506
 
389
- elif solver_name == 'gurobi (dual simplex)':
507
+ elif solver_name == "gurobi (dual simplex)":
390
508
  gurobi_dual_options = {
391
- 'solver_options': {
392
- 'Method': 1, # Dual simplex method
393
- 'Threads': 0, # Use all available cores
394
- 'Presolve': 2, # Aggressive presolve
509
+ "solver_options": {
510
+ "Method": 1, # Dual simplex method
511
+ "Threads": 0, # Use all available cores
512
+ "Presolve": 2, # Aggressive presolve
395
513
  }
396
514
  }
397
515
  if solver_options:
398
516
  gurobi_dual_options.update(solver_options)
399
- return 'gurobi', gurobi_dual_options
400
-
517
+ return "gurobi", gurobi_dual_options
518
+
401
519
  # Handle special Mosek configurations
402
- elif solver_name == 'mosek (default)':
520
+ elif solver_name == "mosek (default)":
403
521
  # No custom options - let Mosek use its default configuration
404
522
  mosek_default_options = {
405
- 'solver_options': {
406
- 'MSK_DPAR_MIO_REL_GAP_CONST': 0.05, # MIP relative gap tolerance (5% to match Gurobi)
407
- 'MSK_IPAR_MIO_MAX_TIME': 36000, # Max time 1 hour
523
+ "solver_options": {
524
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # MIP relative gap tolerance (5% to match Gurobi)
525
+ "MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
408
526
  }
409
527
  }
410
528
  if solver_options:
411
- mosek_default_options['solver_options'].update(solver_options)
412
- logger.info(f"Using Mosek with default configuration (auto-select optimizer) and moderate MIP strategies")
413
- return 'mosek', mosek_default_options
414
-
415
- elif solver_name == 'mosek (barrier)':
529
+ mosek_default_options["solver_options"].update(solver_options)
530
+ logger.info(
531
+ f"Using Mosek with default configuration (auto-select optimizer) and moderate MIP strategies"
532
+ )
533
+ return "mosek", mosek_default_options
534
+
535
+ elif solver_name == "mosek (barrier)":
416
536
  mosek_barrier_options = {
417
- 'solver_options': {
418
- 'MSK_IPAR_INTPNT_BASIS': 0, # Skip crossover (barrier-only) - 0 = MSK_BI_NEVER
419
- 'MSK_DPAR_INTPNT_TOL_REL_GAP': 1e-4, # Match Gurobi barrier tolerance
420
- 'MSK_DPAR_INTPNT_TOL_PFEAS': 1e-5, # Match Gurobi primal feasibility
421
- 'MSK_DPAR_INTPNT_TOL_DFEAS': 1e-5, # Match Gurobi dual feasibility
537
+ "solver_options": {
538
+ "MSK_IPAR_INTPNT_BASIS": 0, # Skip crossover (barrier-only) - 0 = MSK_BI_NEVER
539
+ "MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance
540
+ "MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi primal feasibility
541
+ "MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi dual feasibility
422
542
  # Removed MSK_DPAR_INTPNT_TOL_INFEAS - was 1000x tighter than other tolerances!
423
- 'MSK_IPAR_NUM_THREADS': 0, # Use all available cores (0 = auto)
424
- 'MSK_IPAR_PRESOLVE_USE': 2, # Aggressive presolve (match Gurobi Presolve=2)
425
- 'MSK_IPAR_PRESOLVE_LINDEP_USE': 1, # Linear dependency check
426
- 'MSK_DPAR_MIO_REL_GAP_CONST': 0.05, # Match Gurobi 5% MIP gap
427
- 'MSK_IPAR_MIO_NODE_OPTIMIZER': 4, # Use interior-point for MIP nodes
428
- 'MSK_IPAR_MIO_ROOT_OPTIMIZER': 4, # Use interior-point for MIP root
429
- 'MSK_DPAR_MIO_MAX_TIME': 36000, # Max time 10 hour
543
+ "MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
544
+ "MSK_IPAR_PRESOLVE_USE": 2, # Aggressive presolve (match Gurobi Presolve=2)
545
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap
546
+ "MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
547
+ "MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour
430
548
  }
431
549
  }
432
550
  if solver_options:
433
- mosek_barrier_options['solver_options'].update(solver_options)
434
- logger.info(f"Using Mosek Barrier with aggressive presolve and relaxed tolerances")
435
- return 'mosek', mosek_barrier_options
436
-
437
- elif solver_name == 'mosek (barrier+crossover)':
551
+ mosek_barrier_options["solver_options"].update(solver_options)
552
+ logger.info(
553
+ f"Using Mosek Barrier with aggressive presolve and relaxed tolerances"
554
+ )
555
+ return "mosek", mosek_barrier_options
556
+
557
+ elif solver_name == "mosek (barrier+crossover)":
438
558
  mosek_barrier_crossover_options = {
439
- 'solver_options': {
440
- 'MSK_IPAR_INTPNT_BASIS': 1, # Always crossover (1 = MSK_BI_ALWAYS)
441
- 'MSK_DPAR_INTPNT_TOL_REL_GAP': 1e-4, # Match Gurobi barrier tolerance (was 1e-6)
442
- 'MSK_DPAR_INTPNT_TOL_PFEAS': 1e-5, # Match Gurobi (was 1e-6)
443
- 'MSK_DPAR_INTPNT_TOL_DFEAS': 1e-5, # Match Gurobi (was 1e-6)
444
- 'MSK_IPAR_NUM_THREADS': 0, # Use all available cores (0 = auto)
445
- 'MSK_DPAR_MIO_REL_GAP_CONST': 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
446
- 'MSK_IPAR_MIO_ROOT_OPTIMIZER': 4, # Use interior-point for MIP root
447
- 'MSK_DPAR_MIO_MAX_TIME': 36000, # Max time 10 hour (safety limit)
559
+ "solver_options": {
560
+ "MSK_IPAR_INTPNT_BASIS": 1, # Always crossover (1 = MSK_BI_ALWAYS)
561
+ "MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance (was 1e-6)
562
+ "MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi (was 1e-6)
563
+ "MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi (was 1e-6)
564
+ "MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
565
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
566
+ "MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
567
+ "MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
448
568
  }
449
569
  }
450
570
  if solver_options:
451
- mosek_barrier_crossover_options['solver_options'].update(solver_options)
452
- logger.info(f"Using Mosek Barrier+Crossover configuration with Gurobi-matched tolerances and moderate MIP strategies")
453
- return 'mosek', mosek_barrier_crossover_options
454
-
455
- elif solver_name == 'mosek (dual simplex)':
456
- mosek_dual_options = {
457
- 'solver_options': {
458
- 'MSK_IPAR_NUM_THREADS': 0, # Use all available cores (0 = automatic)
459
- 'MSK_IPAR_PRESOLVE_USE': 1, # Force presolve
460
- 'MSK_DPAR_MIO_REL_GAP_CONST': 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
461
- 'MSK_IPAR_MIO_NODE_OPTIMIZER': 1, # Use dual simplex for MIP nodes (1 = MSK_OPTIMIZER_DUAL_SIMPLEX)
462
- 'MSK_IPAR_MIO_ROOT_OPTIMIZER': 1, # Use dual simplex for MIP root
463
- 'MSK_DPAR_MIO_MAX_TIME': 36000, # Max time 10 hour (safety limit)
571
+ mosek_barrier_crossover_options["solver_options"].update(solver_options)
572
+ logger.info(
573
+ f"Using Mosek Barrier+Crossover configuration with Gurobi-matched tolerances and moderate MIP strategies"
574
+ )
575
+ return "mosek", mosek_barrier_crossover_options
464
576
 
577
+ elif solver_name == "mosek (dual simplex)":
578
+ mosek_dual_options = {
579
+ "solver_options": {
580
+ "MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = automatic)
581
+ "MSK_IPAR_PRESOLVE_USE": 1, # Force presolve
582
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
583
+ "MSK_IPAR_MIO_ROOT_OPTIMIZER": 1, # Use dual simplex for MIP root
584
+ "MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
465
585
  }
466
586
  }
467
587
  if solver_options:
468
- mosek_dual_options['solver_options'].update(solver_options)
469
- logger.info(f"Using Mosek Dual Simplex configuration with Gurobi-matched tolerances and moderate MIP strategies")
470
- return 'mosek', mosek_dual_options
471
-
588
+ mosek_dual_options["solver_options"].update(solver_options)
589
+ logger.info(
590
+ f"Using Mosek Dual Simplex configuration with Gurobi-matched tolerances and moderate MIP strategies"
591
+ )
592
+ return "mosek", mosek_dual_options
593
+
472
594
  # Check if this is a known valid solver name
473
- elif solver_name == 'mosek':
595
+ elif solver_name == "mosek":
474
596
  # Add default MILP-friendly settings for plain Mosek
475
597
  mosek_defaults = {
476
- 'solver_options': {
477
- 'MSK_DPAR_MIO_REL_GAP_CONST': 0.05, # Match Gurobi 5% MIP gap (was 1e-4)
478
- 'MSK_IPAR_MIO_MAX_TIME': 36000, # Max time 1 hour
479
- 'MSK_IPAR_NUM_THREADS': 0, # Use all cores (0 = auto)
598
+ "solver_options": {
599
+ "MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-4)
600
+ "MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
601
+ "MSK_IPAR_NUM_THREADS": 0, # Use all cores (0 = auto)
480
602
  }
481
603
  }
482
604
  if solver_options:
483
- mosek_defaults['solver_options'].update(solver_options)
484
- logger.info(f"Using Mosek with barrier method for MIP (interior-point for root/nodes)")
605
+ mosek_defaults["solver_options"].update(solver_options)
606
+ logger.info(
607
+ f"Using Mosek with barrier method for MIP (interior-point for root/nodes)"
608
+ )
485
609
  return solver_name, mosek_defaults
486
-
487
- elif solver_name == 'gurobi':
610
+
611
+ elif solver_name == "gurobi":
488
612
  # Add default MILP-friendly settings for plain Gurobi (for consistency)
489
613
  gurobi_defaults = {
490
- 'solver_options': {
491
- 'MIPGap': 1e-4, # 0.01% gap
492
- 'TimeLimit': 3600, # 1 hour
493
- 'Threads': 0, # Use all cores
494
- 'OutputFlag': 1, # Enable output
614
+ "solver_options": {
615
+ "MIPGap": 1e-4, # 0.01% gap
616
+ "TimeLimit": 3600, # 1 hour
617
+ "Threads": 0, # Use all cores
618
+ "OutputFlag": 1, # Enable output
495
619
  }
496
620
  }
497
621
  if solver_options:
498
- gurobi_defaults['solver_options'].update(solver_options)
622
+ gurobi_defaults["solver_options"].update(solver_options)
499
623
  logger.info(f"Using Gurobi with default MILP-friendly settings")
500
624
  return solver_name, gurobi_defaults
501
-
625
+
502
626
  # Handle special COPT configurations
503
- elif solver_name == 'copt (barrier)':
627
+ elif solver_name == "copt (barrier)":
504
628
  copt_barrier_options = {
505
- 'solver_options': {
506
- 'LpMethod': 2, # Barrier method
507
- 'Crossover': 0, # Skip crossover for speed
508
- 'RelGap': 0.05, # 5% MIP gap (match Gurobi)
509
- 'TimeLimit': 7200, # 1 hour time limit
510
- 'Threads': -1, # 4 threads (memory-conscious)
511
- 'Presolve': 3, # Aggressive presolve
512
- 'Scaling': 1, # Enable scaling
513
- 'FeasTol': 1e-5, # Match Gurobi feasibility
514
- 'DualTol': 1e-5, # Match Gurobi dual tolerance
629
+ "solver_options": {
630
+ "LpMethod": 2, # Barrier method
631
+ "Crossover": 0, # Skip crossover for speed
632
+ "RelGap": 0.05, # 5% MIP gap (match Gurobi)
633
+ "TimeLimit": 7200, # 1 hour time limit
634
+ "Threads": -1, # 4 threads (memory-conscious)
635
+ "Presolve": 3, # Aggressive presolve
636
+ "Scaling": 1, # Enable scaling
637
+ "FeasTol": 1e-5, # Match Gurobi feasibility
638
+ "DualTol": 1e-5, # Match Gurobi dual tolerance
515
639
  # MIP performance settings
516
- 'CutLevel': 2, # Normal cut generation
517
- 'HeurLevel': 3, # Aggressive heuristics
518
- 'StrongBranching': 1, # Fast strong branching
640
+ "CutLevel": 2, # Normal cut generation
641
+ "HeurLevel": 3, # Aggressive heuristics
642
+ "StrongBranching": 1, # Fast strong branching
519
643
  }
520
644
  }
521
645
  if solver_options:
522
- copt_barrier_options['solver_options'].update(solver_options)
523
- logger.info(f"Using COPT Barrier configuration (fast interior-point method)")
524
- return 'copt', copt_barrier_options
525
-
526
- elif solver_name == 'copt (barrier homogeneous)':
646
+ copt_barrier_options["solver_options"].update(solver_options)
647
+ logger.info(
648
+ f"Using COPT Barrier configuration (fast interior-point method)"
649
+ )
650
+ return "copt", copt_barrier_options
651
+
652
+ elif solver_name == "copt (barrier homogeneous)":
527
653
  copt_barrier_homogeneous_options = {
528
- 'solver_options': {
529
- 'LpMethod': 2, # Barrier method
530
- 'Crossover': 0, # Skip crossover
531
- 'BarHomogeneous': 1, # Use homogeneous self-dual form
532
- 'RelGap': 0.05, # 5% MIP gap
533
- 'TimeLimit': 3600, # 1 hour
534
- 'Threads': -1, # 4 threads (memory-conscious)
535
- 'Presolve': 3, # Aggressive presolve
536
- 'Scaling': 1, # Enable scaling
537
- 'FeasTol': 1e-5,
538
- 'DualTol': 1e-5,
654
+ "solver_options": {
655
+ "LpMethod": 2, # Barrier method
656
+ "Crossover": 0, # Skip crossover
657
+ "BarHomogeneous": 1, # Use homogeneous self-dual form
658
+ "RelGap": 0.05, # 5% MIP gap
659
+ "TimeLimit": 3600, # 1 hour
660
+ "Threads": -1, # 4 threads (memory-conscious)
661
+ "Presolve": 3, # Aggressive presolve
662
+ "Scaling": 1, # Enable scaling
663
+ "FeasTol": 1e-5,
664
+ "DualTol": 1e-5,
539
665
  # MIP performance settings
540
- 'CutLevel': 2, # Normal cuts
541
- 'HeurLevel': 3, # Aggressive heuristics
542
- 'StrongBranching': 1, # Fast strong branching
666
+ "CutLevel": 2, # Normal cuts
667
+ "HeurLevel": 3, # Aggressive heuristics
668
+ "StrongBranching": 1, # Fast strong branching
543
669
  }
544
670
  }
545
671
  if solver_options:
546
- copt_barrier_homogeneous_options['solver_options'].update(solver_options)
672
+ copt_barrier_homogeneous_options["solver_options"].update(
673
+ solver_options
674
+ )
547
675
  logger.info(f"Using COPT Barrier Homogeneous configuration")
548
- return 'copt', copt_barrier_homogeneous_options
549
-
550
- elif solver_name == 'copt (barrier+crossover)':
676
+ return "copt", copt_barrier_homogeneous_options
677
+
678
+ elif solver_name == "copt (barrier+crossover)":
551
679
  copt_barrier_crossover_options = {
552
- 'solver_options': {
553
- 'LpMethod': 2, # Barrier method
554
- 'Crossover': 1, # Enable crossover for better solutions
555
- 'RelGap': 0.05, # 5% MIP gap (relaxed for faster solves)
556
- 'TimeLimit': 36000, # 10 hour
557
- 'Threads': -1, # Use all cores
558
- 'Presolve': 2, # Aggressive presolve
559
- 'Scaling': 1, # Enable scaling
560
- 'FeasTol': 1e-4, # Tighter feasibility
561
- 'DualTol': 1e-4, # Tighter dual tolerance
680
+ "solver_options": {
681
+ "LpMethod": 2, # Barrier method
682
+ "Crossover": 1, # Enable crossover for better solutions
683
+ "RelGap": 0.05, # 5% MIP gap (relaxed for faster solves)
684
+ "TimeLimit": 36000, # 10 hour
685
+ "Threads": -1, # Use all cores
686
+ "Presolve": 2, # Aggressive presolve
687
+ "Scaling": 1, # Enable scaling
688
+ "FeasTol": 1e-4, # Tighter feasibility
689
+ "DualTol": 1e-4, # Tighter dual tolerance
562
690
  }
563
691
  }
564
692
  if solver_options:
565
- copt_barrier_crossover_options['solver_options'].update(solver_options)
566
- logger.info(f"Using COPT Barrier+Crossover configuration (balanced performance)")
567
- return 'copt', copt_barrier_crossover_options
568
-
569
- elif solver_name == 'copt (dual simplex)':
693
+ copt_barrier_crossover_options["solver_options"].update(solver_options)
694
+ logger.info(
695
+ f"Using COPT Barrier+Crossover configuration (balanced performance)"
696
+ )
697
+ return "copt", copt_barrier_crossover_options
698
+
699
+ elif solver_name == "copt (dual simplex)":
570
700
  copt_dual_simplex_options = {
571
- 'solver_options': {
572
- 'LpMethod': 1, # Dual simplex method
573
- 'RelGap': 0.05, # 5% MIP gap
574
- 'TimeLimit': 3600, # 1 hour
575
- 'Threads': -1, # Use all cores
576
- 'Presolve': 3, # Aggressive presolve
577
- 'Scaling': 1, # Enable scaling
578
- 'FeasTol': 1e-6,
579
- 'DualTol': 1e-6,
701
+ "solver_options": {
702
+ "LpMethod": 1, # Dual simplex method
703
+ "RelGap": 0.05, # 5% MIP gap
704
+ "TimeLimit": 3600, # 1 hour
705
+ "Threads": -1, # Use all cores
706
+ "Presolve": 3, # Aggressive presolve
707
+ "Scaling": 1, # Enable scaling
708
+ "FeasTol": 1e-6,
709
+ "DualTol": 1e-6,
580
710
  # MIP performance settings
581
- 'CutLevel': 2, # Normal cuts
582
- 'HeurLevel': 2, # Normal heuristics
583
- 'StrongBranching': 1, # Fast strong branching
711
+ "CutLevel": 2, # Normal cuts
712
+ "HeurLevel": 2, # Normal heuristics
713
+ "StrongBranching": 1, # Fast strong branching
584
714
  }
585
715
  }
586
716
  if solver_options:
587
- copt_dual_simplex_options['solver_options'].update(solver_options)
717
+ copt_dual_simplex_options["solver_options"].update(solver_options)
588
718
  logger.info(f"Using COPT Dual Simplex configuration (robust method)")
589
- return 'copt', copt_dual_simplex_options
590
-
591
- elif solver_name == 'copt (concurrent)':
719
+ return "copt", copt_dual_simplex_options
720
+
721
+ elif solver_name == "copt (concurrent)":
592
722
  copt_concurrent_options = {
593
- 'solver_options': {
594
- 'LpMethod': 4, # Concurrent (simplex + barrier)
595
- 'RelGap': 0.05, # 5% MIP gap
596
- 'TimeLimit': 3600, # 1 hour
597
- 'Threads': -1, # Use all cores
598
- 'Presolve': 3, # Aggressive presolve
599
- 'Scaling': 1, # Enable scaling
600
- 'FeasTol': 1e-5,
601
- 'DualTol': 1e-5,
723
+ "solver_options": {
724
+ "LpMethod": 4, # Concurrent (simplex + barrier)
725
+ "RelGap": 0.05, # 5% MIP gap
726
+ "TimeLimit": 3600, # 1 hour
727
+ "Threads": -1, # Use all cores
728
+ "Presolve": 3, # Aggressive presolve
729
+ "Scaling": 1, # Enable scaling
730
+ "FeasTol": 1e-5,
731
+ "DualTol": 1e-5,
602
732
  # MIP performance settings
603
- 'CutLevel': 2, # Normal cuts
604
- 'HeurLevel': 3, # Aggressive heuristics
605
- 'StrongBranching': 1, # Fast strong branching
733
+ "CutLevel": 2, # Normal cuts
734
+ "HeurLevel": 3, # Aggressive heuristics
735
+ "StrongBranching": 1, # Fast strong branching
606
736
  }
607
737
  }
608
738
  if solver_options:
609
- copt_concurrent_options['solver_options'].update(solver_options)
610
- logger.info(f"Using COPT Concurrent configuration (parallel simplex + barrier)")
611
- return 'copt', copt_concurrent_options
612
-
613
- elif solver_name in ['highs', 'cplex', 'glpk', 'cbc', 'scip', 'copt']:
739
+ copt_concurrent_options["solver_options"].update(solver_options)
740
+ logger.info(
741
+ f"Using COPT Concurrent configuration (parallel simplex + barrier)"
742
+ )
743
+ return "copt", copt_concurrent_options
744
+
745
+ elif solver_name in ["highs", "cplex", "glpk", "cbc", "scip", "copt"]:
614
746
  return solver_name, solver_options
615
-
747
+
616
748
  else:
617
749
  # Unknown solver name - log warning and fall back to highs
618
- logger.warning(f"Unknown solver name '{solver_name}' - falling back to 'highs'")
619
- return 'highs', solver_options
620
-
621
-
750
+ logger.warning(
751
+ f"Unknown solver name '{solver_name}' - falling back to 'highs'"
752
+ )
753
+ return "highs", solver_options
754
+
622
755
  def _detect_constraint_type(self, constraint_code: str) -> str:
623
756
  """
624
757
  Detect if constraint is network-modification or model-constraint type.
625
-
758
+
626
759
  Args:
627
760
  constraint_code: The constraint code to analyze
628
-
761
+
629
762
  Returns:
630
763
  "model_constraint" or "network_modification"
631
764
  """
632
765
  # Type 2 indicators (model constraints) - need access to optimization model
633
766
  model_indicators = [
634
- 'n.optimize.create_model()',
635
- 'm.variables',
636
- 'm.add_constraints',
637
- 'gen_p =',
638
- 'constraint_expr =',
639
- 'LinearExpression',
640
- 'linopy',
641
- 'Generator-p',
642
- 'lhs <=',
643
- 'constraint_expr ='
767
+ "n.optimize.create_model()",
768
+ "m.variables",
769
+ "m.add_constraints",
770
+ "gen_p =",
771
+ "constraint_expr =",
772
+ "LinearExpression",
773
+ "linopy",
774
+ "Generator-p",
775
+ "lhs <=",
776
+ "constraint_expr =",
644
777
  ]
645
-
778
+
646
779
  # Type 1 indicators (network modifications) - modify network directly
647
780
  network_indicators = [
648
- 'n.generators.loc',
649
- 'n.add(',
650
- 'n.buses.',
651
- 'n.lines.',
652
- 'network.generators.loc',
653
- 'network.add(',
654
- 'network.buses.',
655
- 'network.lines.'
781
+ "n.generators.loc",
782
+ "n.add(",
783
+ "n.buses.",
784
+ "n.lines.",
785
+ "network.generators.loc",
786
+ "network.add(",
787
+ "network.buses.",
788
+ "network.lines.",
656
789
  ]
657
-
790
+
658
791
  # Check for model constraint indicators first (more specific)
659
792
  if any(indicator in constraint_code for indicator in model_indicators):
660
793
  return "model_constraint"
@@ -664,170 +797,214 @@ class NetworkSolver:
664
797
  # Default to network_modification for safety (existing behavior)
665
798
  return "network_modification"
666
799
 
667
- def _create_extra_functionality(self, optimization_constraints: list, constraint_applicator) -> callable:
800
+ def _create_extra_functionality(
801
+ self, optimization_constraints: list, constraint_applicator
802
+ ) -> callable:
668
803
  """
669
804
  Create extra_functionality function for optimization-time constraints.
670
-
805
+
671
806
  This matches the old PyPSA solver's approach to applying constraints during optimization.
672
-
807
+
673
808
  Args:
674
809
  optimization_constraints: List of optimization constraint dictionaries
675
810
  constraint_applicator: ConstraintApplicator instance
676
-
811
+
677
812
  Returns:
678
813
  Function that can be passed to network.optimize(extra_functionality=...)
679
814
  """
815
+
680
816
  def extra_functionality(network, snapshots):
681
817
  """Apply optimization constraints during solve - matches old code structure"""
682
818
  try:
683
- logger.info(f"Applying {len(optimization_constraints)} optimization constraints during solve")
684
-
819
+ logger.info(
820
+ f"Applying {len(optimization_constraints)} optimization constraints during solve"
821
+ )
822
+
685
823
  # Apply each constraint in priority order
686
- sorted_constraints = sorted(optimization_constraints, key=lambda x: x.get('priority', 0))
687
-
824
+ sorted_constraints = sorted(
825
+ optimization_constraints, key=lambda x: x.get("priority", 0)
826
+ )
827
+
688
828
  for constraint in sorted_constraints:
689
829
  try:
690
- constraint_applicator.apply_optimization_constraint(network, snapshots, constraint)
830
+ constraint_applicator.apply_optimization_constraint(
831
+ network, snapshots, constraint
832
+ )
691
833
  except Exception as e:
692
- logger.error(f"Failed to apply optimization constraint {constraint.get('name', 'unknown')}: {e}")
834
+ logger.error(
835
+ f"Failed to apply optimization constraint {constraint.get('name', 'unknown')}: {e}"
836
+ )
693
837
  continue
694
-
838
+
695
839
  logger.info("Optimization constraints applied successfully")
696
-
840
+
697
841
  except Exception as e:
698
842
  logger.error(f"Failed to apply optimization constraints: {e}")
699
843
  # Don't re-raise - let optimization continue
700
-
844
+
701
845
  return extra_functionality
702
-
703
- def _set_snapshot_weightings_after_multiperiod(self, conn, network_id: int, network: 'pypsa.Network'):
704
- """Set snapshot weightings AFTER multi-period setup - matches old code approach."""
846
+
847
+ def _set_snapshot_weightings_after_multiperiod(
848
+ self, conn, network: "pypsa.Network"
849
+ ):
850
+ """Set snapshot weightings AFTER multi-period setup - matches old code approach (single network per database)."""
705
851
  try:
706
852
  from pyconvexity.models import get_network_time_periods, get_network_info
707
-
708
- time_periods = get_network_time_periods(conn, network_id)
853
+
854
+ time_periods = get_network_time_periods(conn)
709
855
  if time_periods and len(network.snapshots) > 0:
710
- logger.info(f"Setting snapshot weightings AFTER multi-period setup for {len(time_periods)} time periods")
711
-
712
- # Get network info to determine time interval (stored in networks table, not network_config)
713
- network_info = get_network_info(conn, network_id)
714
- time_interval = network_info.get('time_interval', '1H')
856
+ logger.info(
857
+ f"Setting snapshot weightings AFTER multi-period setup for {len(time_periods)} time periods"
858
+ )
859
+
860
+ # Get network info to determine time interval
861
+ network_info = get_network_info(conn)
862
+ time_interval = network_info.get("time_interval", "1H")
715
863
  weight = self._parse_time_interval(time_interval)
716
-
864
+
717
865
  if weight is None:
718
866
  weight = 1.0
719
- logger.warning(f"Could not parse time interval '{time_interval}', using default weight of 1.0")
720
-
721
- logger.info(f"Parsed time interval '{time_interval}' -> weight = {weight}")
722
-
867
+ logger.warning(
868
+ f"Could not parse time interval '{time_interval}', using default weight of 1.0"
869
+ )
870
+
871
+ logger.info(
872
+ f"Parsed time interval '{time_interval}' -> weight = {weight}"
873
+ )
874
+
723
875
  # Create weightings array - all snapshots get the same weight for this time resolution
724
876
  weightings = [weight] * len(time_periods)
725
-
877
+
726
878
  if len(weightings) == len(network.snapshots):
727
879
  # Set all three columns like the old code - critical for proper objective calculation
728
- network.snapshot_weightings.loc[:, 'objective'] = weightings
729
- network.snapshot_weightings.loc[:, 'generators'] = weightings
730
- network.snapshot_weightings.loc[:, 'stores'] = weightings
731
- logger.info(f"Set snapshot weightings AFTER multi-period setup: objective, generators, stores columns")
732
-
880
+ network.snapshot_weightings.loc[:, "objective"] = weightings
881
+ network.snapshot_weightings.loc[:, "generators"] = weightings
882
+ network.snapshot_weightings.loc[:, "stores"] = weightings
883
+ logger.info(
884
+ f"Set snapshot weightings AFTER multi-period setup: objective, generators, stores columns"
885
+ )
886
+
733
887
  # Debug logging like old code
734
- logger.info(f"Snapshot weightings shape: {network.snapshot_weightings.shape}")
735
- logger.info(f"Unique values in objective column: {network.snapshot_weightings['objective'].unique()}")
736
- logger.info(f"Sum of objective column: {network.snapshot_weightings['objective'].sum()}")
888
+ logger.info(
889
+ f"Snapshot weightings shape: {network.snapshot_weightings.shape}"
890
+ )
891
+ logger.info(
892
+ f"Unique values in objective column: {network.snapshot_weightings['objective'].unique()}"
893
+ )
894
+ logger.info(
895
+ f"Sum of objective column: {network.snapshot_weightings['objective'].sum()}"
896
+ )
737
897
  logger.info(f"Weight per snapshot: {weight} hours")
738
898
  else:
739
- logger.warning(f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})")
899
+ logger.warning(
900
+ f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})"
901
+ )
740
902
  except Exception as e:
741
- logger.warning(f"Failed to set snapshot weightings after multi-period setup: {e}")
903
+ logger.warning(
904
+ f"Failed to set snapshot weightings after multi-period setup: {e}"
905
+ )
742
906
  logger.exception("Full traceback:")
743
-
907
+
744
908
  def _parse_time_interval(self, time_interval: str) -> Optional[float]:
745
909
  """Parse time interval string to hours - handles multiple formats."""
746
910
  if not time_interval:
747
911
  return None
748
-
912
+
749
913
  try:
750
914
  # Clean up the string
751
915
  interval = time_interval.strip()
752
-
916
+
753
917
  # Handle ISO 8601 duration format (PT3H, PT30M, etc.)
754
- if interval.startswith('PT') and interval.endswith('H'):
918
+ if interval.startswith("PT") and interval.endswith("H"):
755
919
  # Extract hours (e.g., 'PT3H' -> 3.0)
756
920
  hours_str = interval[2:-1] # Remove 'PT' and 'H'
757
921
  return float(hours_str)
758
- elif interval.startswith('PT') and interval.endswith('M'):
922
+ elif interval.startswith("PT") and interval.endswith("M"):
759
923
  # Extract minutes (e.g., 'PT30M' -> 0.5)
760
924
  minutes_str = interval[2:-1] # Remove 'PT' and 'M'
761
925
  return float(minutes_str) / 60.0
762
- elif interval.startswith('PT') and interval.endswith('S'):
926
+ elif interval.startswith("PT") and interval.endswith("S"):
763
927
  # Extract seconds (e.g., 'PT3600S' -> 1.0)
764
928
  seconds_str = interval[2:-1] # Remove 'PT' and 'S'
765
929
  return float(seconds_str) / 3600.0
766
-
930
+
767
931
  # Handle simple frequency strings (3H, 2D, etc.)
768
- elif interval.endswith('H') or interval.endswith('h'):
932
+ elif interval.endswith("H") or interval.endswith("h"):
769
933
  hours_str = interval[:-1]
770
934
  return float(hours_str) if hours_str else 1.0
771
- elif interval.endswith('D') or interval.endswith('d'):
935
+ elif interval.endswith("D") or interval.endswith("d"):
772
936
  days_str = interval[:-1]
773
937
  return float(days_str) * 24 if days_str else 24.0
774
- elif interval.endswith('M') or interval.endswith('m'):
938
+ elif interval.endswith("M") or interval.endswith("m"):
775
939
  minutes_str = interval[:-1]
776
- return float(minutes_str) / 60.0 if minutes_str else 1.0/60.0
777
- elif interval.endswith('S') or interval.endswith('s'):
940
+ return float(minutes_str) / 60.0 if minutes_str else 1.0 / 60.0
941
+ elif interval.endswith("S") or interval.endswith("s"):
778
942
  seconds_str = interval[:-1]
779
- return float(seconds_str) / 3600.0 if seconds_str else 1.0/3600.0
780
-
943
+ return float(seconds_str) / 3600.0 if seconds_str else 1.0 / 3600.0
944
+
781
945
  # Try to parse as plain number (assume hours)
782
946
  else:
783
947
  return float(interval)
784
-
948
+
785
949
  except (ValueError, TypeError) as e:
786
950
  logger.warning(f"Could not parse time interval '{time_interval}': {e}")
787
951
  return None
788
-
789
- def _calculate_investment_weightings(self, network: 'pypsa.Network', discount_rate: float) -> None:
952
+
953
+ def _calculate_investment_weightings(
954
+ self, network: "pypsa.Network", discount_rate: float
955
+ ) -> None:
790
956
  """
791
957
  Calculate investment period weightings using discount rate - matching old PyPSA solver exactly.
792
-
958
+
793
959
  Args:
794
960
  network: PyPSA Network object
795
961
  discount_rate: Discount rate for NPV calculations
796
962
  """
797
963
  try:
798
964
  import pandas as pd
799
-
800
- if not hasattr(network, 'investment_periods') or len(network.investment_periods) == 0:
965
+
966
+ if (
967
+ not hasattr(network, "investment_periods")
968
+ or len(network.investment_periods) == 0
969
+ ):
801
970
  return
802
-
971
+
803
972
  years = network.investment_periods
804
973
  # Convert pandas Index to list for easier handling
805
- years_list = years.tolist() if hasattr(years, 'tolist') else list(years)
806
-
807
- logger.info(f"Calculating investment weightings for periods: {years_list} with discount rate: {discount_rate}")
808
-
974
+ years_list = years.tolist() if hasattr(years, "tolist") else list(years)
975
+
976
+ logger.info(
977
+ f"Calculating investment weightings for periods: {years_list} with discount rate: {discount_rate}"
978
+ )
979
+
809
980
  # For single year, use simple weighting of 1.0
810
981
  if len(years_list) == 1:
811
982
  # Single year case
812
- network.investment_period_weightings = pd.DataFrame({
813
- 'objective': pd.Series({years_list[0]: 1.0}),
814
- 'years': pd.Series({years_list[0]: 1})
815
- })
816
- logger.info(f"Set single-year investment period weightings for year {years_list[0]}")
983
+ network.investment_period_weightings = pd.DataFrame(
984
+ {
985
+ "objective": pd.Series({years_list[0]: 1.0}),
986
+ "years": pd.Series({years_list[0]: 1}),
987
+ }
988
+ )
989
+ logger.info(
990
+ f"Set single-year investment period weightings for year {years_list[0]}"
991
+ )
817
992
  else:
818
993
  # Multi-year case - EXACTLY match old code logic
819
994
  # Get unique years from the network snapshots to determine period lengths
820
- if hasattr(network.snapshots, 'year'):
995
+ if hasattr(network.snapshots, "year"):
821
996
  snapshot_years = sorted(network.snapshots.year.unique())
822
- elif hasattr(network.snapshots, 'get_level_values'):
997
+ elif hasattr(network.snapshots, "get_level_values"):
823
998
  # MultiIndex case - get years from 'period' level
824
- snapshot_years = sorted(network.snapshots.get_level_values('period').unique())
999
+ snapshot_years = sorted(
1000
+ network.snapshots.get_level_values("period").unique()
1001
+ )
825
1002
  else:
826
1003
  # Fallback: use investment periods as years
827
1004
  snapshot_years = years_list
828
-
1005
+
829
1006
  logger.info(f"Snapshot years found: {snapshot_years}")
830
-
1007
+
831
1008
  # Calculate years per period - EXACTLY matching old code
832
1009
  years_diff = []
833
1010
  for i, year in enumerate(years_list):
@@ -839,7 +1016,9 @@ class NetworkSolver:
839
1016
  # For the last period, calculate based on snapshot coverage
840
1017
  if snapshot_years:
841
1018
  # Find the last snapshot year that's >= current period year
842
- last_snapshot_year = max([y for y in snapshot_years if y >= year])
1019
+ last_snapshot_year = max(
1020
+ [y for y in snapshot_years if y >= year]
1021
+ )
843
1022
  period_years = last_snapshot_year - year + 1
844
1023
  else:
845
1024
  # Fallback: assume same length as previous period or 1
@@ -847,71 +1026,83 @@ class NetworkSolver:
847
1026
  period_years = years_diff[-1] # Same as previous period
848
1027
  else:
849
1028
  period_years = 1
850
-
1029
+
851
1030
  years_diff.append(period_years)
852
1031
  logger.info(f"Period {year}: {period_years} years")
853
-
1032
+
854
1033
  # Create weightings DataFrame with years column
855
- weightings_df = pd.DataFrame({
856
- 'years': pd.Series(years_diff, index=years_list)
857
- })
858
-
1034
+ weightings_df = pd.DataFrame(
1035
+ {"years": pd.Series(years_diff, index=years_list)}
1036
+ )
1037
+
859
1038
  # Calculate objective weightings with discount rate - EXACTLY matching old code
860
1039
  r = discount_rate
861
1040
  T = 0 # Cumulative time tracker
862
-
1041
+
863
1042
  logger.info(f"Calculating discount factors with rate {r}:")
864
1043
  for period, nyears in weightings_df.years.items():
865
1044
  # Calculate discount factors for each year in this period
866
1045
  discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
867
1046
  period_weighting = sum(discounts)
868
1047
  weightings_df.at[period, "objective"] = period_weighting
869
-
870
- logger.info(f" Period {period}: years {T} to {T + nyears - 1}, discounts={[f'{d:.4f}' for d in discounts]}, sum={period_weighting:.4f}")
1048
+
1049
+ logger.info(
1050
+ f" Period {period}: years {T} to {T + nyears - 1}, discounts={[f'{d:.4f}' for d in discounts]}, sum={period_weighting:.4f}"
1051
+ )
871
1052
  T += nyears # Update cumulative time
872
-
1053
+
873
1054
  network.investment_period_weightings = weightings_df
874
1055
  logger.info(f"Final investment period weightings:")
875
1056
  logger.info(f" Years: {weightings_df['years'].to_dict()}")
876
1057
  logger.info(f" Objective: {weightings_df['objective'].to_dict()}")
877
-
1058
+
878
1059
  except Exception as e:
879
1060
  logger.error(f"Failed to calculate investment weightings: {e}")
880
1061
  logger.exception("Full traceback:")
881
-
882
-
883
- def _extract_solve_results(self, network: 'pypsa.Network', result: Any, solve_time: float, solver_name: str, run_id: str) -> Dict[str, Any]:
1062
+
1063
+ def _extract_solve_results(
1064
+ self,
1065
+ network: "pypsa.Network",
1066
+ result: Any,
1067
+ solve_time: float,
1068
+ solver_name: str,
1069
+ run_id: str,
1070
+ ) -> Dict[str, Any]:
884
1071
  """
885
1072
  Extract solve results from PyPSA network.
886
-
1073
+
887
1074
  Args:
888
1075
  network: Solved PyPSA Network object
889
1076
  result: PyPSA solve result
890
1077
  solve_time: Time taken to solve
891
1078
  solver_name: Name of solver used
892
1079
  run_id: Unique run identifier
893
-
1080
+
894
1081
  Returns:
895
1082
  Dictionary with solve results and metadata
896
1083
  """
897
1084
  try:
898
1085
  # Extract basic solve information
899
- status = getattr(result, 'status', 'unknown')
900
- objective_value = getattr(network, 'objective', None)
901
-
1086
+ status = getattr(result, "status", "unknown")
1087
+ objective_value = getattr(network, "objective", None)
1088
+
902
1089
  # Debug logging
903
- logger.info(f"Raw PyPSA result attributes: {dir(result) if result else 'None'}")
904
- if hasattr(result, 'termination_condition'):
1090
+ logger.info(
1091
+ f"Raw PyPSA result attributes: {dir(result) if result else 'None'}"
1092
+ )
1093
+ if hasattr(result, "termination_condition"):
905
1094
  logger.info(f"Termination condition: {result.termination_condition}")
906
- if hasattr(result, 'solver'):
1095
+ if hasattr(result, "solver"):
907
1096
  logger.info(f"Solver info: {result.solver}")
908
-
1097
+
909
1098
  # Convert PyPSA result to dictionary format
910
1099
  result_dict = self._convert_pypsa_result_to_dict(result)
911
-
1100
+
912
1101
  # Determine success based on multiple criteria
913
- success = self._determine_solve_success(result, network, status, objective_value)
914
-
1102
+ success = self._determine_solve_success(
1103
+ result, network, status, objective_value
1104
+ )
1105
+
915
1106
  solve_result = {
916
1107
  "success": success,
917
1108
  "status": status,
@@ -926,16 +1117,16 @@ class NetworkSolver:
926
1117
  "num_loads": len(network.loads),
927
1118
  "num_lines": len(network.lines),
928
1119
  "num_links": len(network.links),
929
- "num_snapshots": len(network.snapshots)
1120
+ "num_snapshots": len(network.snapshots),
930
1121
  }
931
-
1122
+
932
1123
  # Add multi-period information if available
933
- if hasattr(network, '_available_years') and network._available_years:
1124
+ if hasattr(network, "_available_years") and network._available_years:
934
1125
  solve_result["years"] = network._available_years
935
1126
  solve_result["multi_period"] = len(network._available_years) > 1
936
-
1127
+
937
1128
  return solve_result
938
-
1129
+
939
1130
  except Exception as e:
940
1131
  logger.error(f"Failed to extract solve results: {e}")
941
1132
  return {
@@ -945,91 +1136,109 @@ class NetworkSolver:
945
1136
  "solve_time": solve_time,
946
1137
  "solver_name": solver_name,
947
1138
  "run_id": run_id,
948
- "objective_value": None
1139
+ "objective_value": None,
949
1140
  }
950
-
951
- def _determine_solve_success(self, result: Any, network: 'pypsa.Network', status: str, objective_value: Optional[float]) -> bool:
1141
+
1142
+ def _determine_solve_success(
1143
+ self,
1144
+ result: Any,
1145
+ network: "pypsa.Network",
1146
+ status: str,
1147
+ objective_value: Optional[float],
1148
+ ) -> bool:
952
1149
  """
953
1150
  Determine if solve was successful based on multiple criteria.
954
-
1151
+
955
1152
  PyPSA sometimes returns status='unknown' even for successful solves,
956
1153
  so we need to check multiple indicators.
957
1154
  """
958
1155
  try:
959
1156
  # Check explicit status first
960
- if status in ['optimal', 'feasible']:
1157
+ if status in ["optimal", "feasible"]:
961
1158
  logger.info(f"Success determined by status: {status}")
962
1159
  return True
963
-
1160
+
964
1161
  # Check termination condition
965
- if hasattr(result, 'termination_condition'):
1162
+ if hasattr(result, "termination_condition"):
966
1163
  term_condition = str(result.termination_condition).lower()
967
- if 'optimal' in term_condition:
968
- logger.info(f"Success determined by termination condition: {result.termination_condition}")
1164
+ if "optimal" in term_condition:
1165
+ logger.info(
1166
+ f"Success determined by termination condition: {result.termination_condition}"
1167
+ )
969
1168
  return True
970
-
1169
+
971
1170
  # Check if we have a valid objective value
972
- if objective_value is not None and not (objective_value == 0 and status == 'unknown'):
973
- logger.info(f"Success determined by valid objective value: {objective_value}")
1171
+ if objective_value is not None and not (
1172
+ objective_value == 0 and status == "unknown"
1173
+ ):
1174
+ logger.info(
1175
+ f"Success determined by valid objective value: {objective_value}"
1176
+ )
974
1177
  return True
975
-
1178
+
976
1179
  # Check solver-specific success indicators
977
- if hasattr(result, 'solver'):
1180
+ if hasattr(result, "solver"):
978
1181
  solver_info = result.solver
979
- if hasattr(solver_info, 'termination_condition'):
1182
+ if hasattr(solver_info, "termination_condition"):
980
1183
  term_condition = str(solver_info.termination_condition).lower()
981
- if 'optimal' in term_condition:
982
- logger.info(f"Success determined by solver termination condition: {solver_info.termination_condition}")
1184
+ if "optimal" in term_condition:
1185
+ logger.info(
1186
+ f"Success determined by solver termination condition: {solver_info.termination_condition}"
1187
+ )
983
1188
  return True
984
-
985
- logger.warning(f"Could not determine success: status={status}, objective={objective_value}, result_attrs={dir(result) if result else 'None'}")
1189
+
1190
+ logger.warning(
1191
+ f"Could not determine success: status={status}, objective={objective_value}, result_attrs={dir(result) if result else 'None'}"
1192
+ )
986
1193
  return False
987
-
1194
+
988
1195
  except Exception as e:
989
1196
  logger.error(f"Error determining solve success: {e}")
990
1197
  return False
991
-
1198
+
992
1199
  def _convert_pypsa_result_to_dict(self, result) -> Dict[str, Any]:
993
1200
  """
994
1201
  Convert PyPSA result object to dictionary.
995
-
1202
+
996
1203
  Args:
997
1204
  result: PyPSA solve result object
998
-
1205
+
999
1206
  Returns:
1000
1207
  Dictionary representation of the result
1001
1208
  """
1002
1209
  try:
1003
1210
  if result is None:
1004
1211
  return {"status": "no_result"}
1005
-
1212
+
1006
1213
  result_dict = {}
1007
-
1214
+
1008
1215
  # Extract common attributes
1009
- for attr in ['status', 'success', 'termination_condition', 'solver']:
1216
+ for attr in ["status", "success", "termination_condition", "solver"]:
1010
1217
  if hasattr(result, attr):
1011
1218
  value = getattr(result, attr)
1012
1219
  # Convert to serializable format
1013
- if hasattr(value, '__dict__'):
1220
+ if hasattr(value, "__dict__"):
1014
1221
  result_dict[attr] = str(value)
1015
1222
  else:
1016
1223
  result_dict[attr] = value
1017
-
1224
+
1018
1225
  # Handle solver-specific information
1019
- if hasattr(result, 'solver_results'):
1020
- solver_results = getattr(result, 'solver_results')
1021
- if hasattr(solver_results, '__dict__'):
1022
- result_dict['solver_results'] = str(solver_results)
1226
+ if hasattr(result, "solver_results"):
1227
+ solver_results = getattr(result, "solver_results")
1228
+ if hasattr(solver_results, "__dict__"):
1229
+ result_dict["solver_results"] = str(solver_results)
1023
1230
  else:
1024
- result_dict['solver_results'] = solver_results
1025
-
1231
+ result_dict["solver_results"] = solver_results
1232
+
1026
1233
  return result_dict
1027
-
1234
+
1028
1235
  except Exception as e:
1029
1236
  logger.warning(f"Failed to convert PyPSA result to dict: {e}")
1030
1237
  return {"status": "conversion_failed", "error": str(e)}
1031
-
1032
- def _calculate_comprehensive_network_statistics(self, network: 'pypsa.Network', solve_time: float, solver_name: str) -> Dict[str, Any]:
1238
+
1239
+ def _calculate_comprehensive_network_statistics(
1240
+ self, network: "pypsa.Network", solve_time: float, solver_name: str
1241
+ ) -> Dict[str, Any]:
1033
1242
  """Calculate comprehensive network statistics including PyPSA statistics and custom metrics"""
1034
1243
  try:
1035
1244
  # Initialize statistics structure
@@ -1038,75 +1247,106 @@ class NetworkSolver:
1038
1247
  "pypsa_statistics": {},
1039
1248
  "custom_statistics": {},
1040
1249
  "runtime_info": {},
1041
- "solver_info": {}
1250
+ "solver_info": {},
1042
1251
  }
1043
-
1252
+
1044
1253
  # Core summary statistics
1045
1254
  total_generation = 0
1046
1255
  total_demand = 0
1047
1256
  unserved_energy = 0
1048
-
1257
+
1049
1258
  # Calculate generation statistics
1050
- if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p'):
1259
+ if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
1051
1260
  # Apply snapshot weightings to convert MW to MWh
1052
1261
  weightings = network.snapshot_weightings
1053
1262
  if isinstance(weightings, pd.DataFrame):
1054
- if 'objective' in weightings.columns:
1055
- weighting_values = weightings['objective'].values
1263
+ if "objective" in weightings.columns:
1264
+ weighting_values = weightings["objective"].values
1056
1265
  else:
1057
1266
  weighting_values = weightings.iloc[:, 0].values
1058
1267
  else:
1059
1268
  weighting_values = weightings.values
1060
-
1061
- total_generation = float((network.generators_t.p.values * weighting_values[:, None]).sum())
1062
-
1269
+
1270
+ total_generation = float(
1271
+ (network.generators_t.p.values * weighting_values[:, None]).sum()
1272
+ )
1273
+
1063
1274
  # Calculate unserved energy from UNMET_LOAD generators
1064
- if hasattr(network, 'generators') and hasattr(network, '_component_type_map'):
1065
- unmet_load_gen_names = [name for name, comp_type in network._component_type_map.items()
1066
- if comp_type == 'UNMET_LOAD']
1067
-
1275
+ if hasattr(network, "generators") and hasattr(
1276
+ network, "_component_type_map"
1277
+ ):
1278
+ unmet_load_gen_names = [
1279
+ name
1280
+ for name, comp_type in network._component_type_map.items()
1281
+ if comp_type == "UNMET_LOAD"
1282
+ ]
1283
+
1068
1284
  for gen_name in unmet_load_gen_names:
1069
1285
  if gen_name in network.generators_t.p.columns:
1070
- gen_output = float((network.generators_t.p[gen_name] * weighting_values).sum())
1286
+ gen_output = float(
1287
+ (
1288
+ network.generators_t.p[gen_name] * weighting_values
1289
+ ).sum()
1290
+ )
1071
1291
  unserved_energy += gen_output
1072
-
1292
+
1073
1293
  # Calculate demand statistics
1074
- if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p'):
1294
+ if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
1075
1295
  weightings = network.snapshot_weightings
1076
1296
  if isinstance(weightings, pd.DataFrame):
1077
- if 'objective' in weightings.columns:
1078
- weighting_values = weightings['objective'].values
1297
+ if "objective" in weightings.columns:
1298
+ weighting_values = weightings["objective"].values
1079
1299
  else:
1080
1300
  weighting_values = weightings.iloc[:, 0].values
1081
1301
  else:
1082
1302
  weighting_values = weightings.values
1083
-
1084
- total_demand = float((network.loads_t.p.values * weighting_values[:, None]).sum())
1085
-
1303
+
1304
+ total_demand = float(
1305
+ (network.loads_t.p.values * weighting_values[:, None]).sum()
1306
+ )
1307
+
1086
1308
  statistics["core_summary"] = {
1087
1309
  "total_generation_mwh": total_generation,
1088
1310
  "total_demand_mwh": total_demand,
1089
- "total_cost": float(network.objective) if hasattr(network, 'objective') else None,
1090
- "load_factor": (total_demand / (total_generation + 1e-6)) if total_generation > 0 else 0,
1091
- "unserved_energy_mwh": unserved_energy
1311
+ "total_cost": (
1312
+ float(network.objective) if hasattr(network, "objective") else None
1313
+ ),
1314
+ "load_factor": (
1315
+ (total_demand / (total_generation + 1e-6))
1316
+ if total_generation > 0
1317
+ else 0
1318
+ ),
1319
+ "unserved_energy_mwh": unserved_energy,
1092
1320
  }
1093
-
1321
+
1094
1322
  # Calculate PyPSA statistics
1095
1323
  try:
1096
1324
  pypsa_stats = network.statistics()
1097
1325
  if pypsa_stats is not None and not pypsa_stats.empty:
1098
- statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(pypsa_stats)
1326
+ statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(
1327
+ pypsa_stats
1328
+ )
1099
1329
  else:
1100
1330
  statistics["pypsa_statistics"] = {}
1101
1331
  except Exception as e:
1102
1332
  logger.error(f"Failed to calculate PyPSA statistics: {e}")
1103
1333
  statistics["pypsa_statistics"] = {}
1104
-
1334
+
1105
1335
  # Custom statistics - calculate detailed breakdowns
1106
- total_cost = float(network.objective) if hasattr(network, 'objective') else 0.0
1107
- avg_price = (total_cost / (total_generation + 1e-6)) if total_generation > 0 else None
1108
- unmet_load_percentage = (unserved_energy / (total_demand + 1e-6)) * 100 if total_demand > 0 else 0
1109
-
1336
+ total_cost = (
1337
+ float(network.objective) if hasattr(network, "objective") else 0.0
1338
+ )
1339
+ avg_price = (
1340
+ (total_cost / (total_generation + 1e-6))
1341
+ if total_generation > 0
1342
+ else None
1343
+ )
1344
+ unmet_load_percentage = (
1345
+ (unserved_energy / (total_demand + 1e-6)) * 100
1346
+ if total_demand > 0
1347
+ else 0
1348
+ )
1349
+
1110
1350
  # Note: For solver statistics, we keep simplified approach since this is just for logging
1111
1351
  # The storage module will calculate proper totals from carrier statistics
1112
1352
  statistics["custom_statistics"] = {
@@ -1116,86 +1356,121 @@ class NetworkSolver:
1116
1356
  "total_emissions_tons_co2": 0.0, # Will be calculated properly in storage module
1117
1357
  "average_price_per_mwh": avg_price,
1118
1358
  "unmet_load_percentage": unmet_load_percentage,
1119
- "max_unmet_load_hour_mw": 0.0 # TODO: Calculate max hourly unmet load
1359
+ "max_unmet_load_hour_mw": 0.0, # TODO: Calculate max hourly unmet load
1120
1360
  }
1121
-
1361
+
1122
1362
  # Runtime info
1123
1363
  unmet_load_count = 0
1124
- if hasattr(network, '_component_type_map'):
1125
- unmet_load_count = len([name for name, comp_type in network._component_type_map.items()
1126
- if comp_type == 'UNMET_LOAD'])
1127
-
1364
+ if hasattr(network, "_component_type_map"):
1365
+ unmet_load_count = len(
1366
+ [
1367
+ name
1368
+ for name, comp_type in network._component_type_map.items()
1369
+ if comp_type == "UNMET_LOAD"
1370
+ ]
1371
+ )
1372
+
1128
1373
  statistics["runtime_info"] = {
1129
1374
  "solve_time_seconds": solve_time,
1130
1375
  "component_count": (
1131
- len(network.buses) + len(network.generators) + len(network.loads) +
1132
- len(network.lines) + len(network.links)
1133
- ) if hasattr(network, 'buses') else 0,
1134
- "bus_count": len(network.buses) if hasattr(network, 'buses') else 0,
1135
- "generator_count": len(network.generators) if hasattr(network, 'generators') else 0,
1376
+ (
1377
+ len(network.buses)
1378
+ + len(network.generators)
1379
+ + len(network.loads)
1380
+ + len(network.lines)
1381
+ + len(network.links)
1382
+ )
1383
+ if hasattr(network, "buses")
1384
+ else 0
1385
+ ),
1386
+ "bus_count": len(network.buses) if hasattr(network, "buses") else 0,
1387
+ "generator_count": (
1388
+ len(network.generators) if hasattr(network, "generators") else 0
1389
+ ),
1136
1390
  "unmet_load_count": unmet_load_count,
1137
- "load_count": len(network.loads) if hasattr(network, 'loads') else 0,
1138
- "line_count": len(network.lines) if hasattr(network, 'lines') else 0,
1139
- "snapshot_count": len(network.snapshots) if hasattr(network, 'snapshots') else 0
1391
+ "load_count": len(network.loads) if hasattr(network, "loads") else 0,
1392
+ "line_count": len(network.lines) if hasattr(network, "lines") else 0,
1393
+ "snapshot_count": (
1394
+ len(network.snapshots) if hasattr(network, "snapshots") else 0
1395
+ ),
1140
1396
  }
1141
-
1397
+
1142
1398
  # Solver info
1143
1399
  statistics["solver_info"] = {
1144
1400
  "solver_name": solver_name,
1145
- "termination_condition": "optimal" if hasattr(network, 'objective') else "unknown",
1146
- "objective_value": float(network.objective) if hasattr(network, 'objective') else None
1401
+ "termination_condition": (
1402
+ "optimal" if hasattr(network, "objective") else "unknown"
1403
+ ),
1404
+ "objective_value": (
1405
+ float(network.objective) if hasattr(network, "objective") else None
1406
+ ),
1147
1407
  }
1148
-
1408
+
1149
1409
  return statistics
1150
-
1410
+
1151
1411
  except Exception as e:
1152
- logger.error(f"Failed to calculate comprehensive network statistics: {e}", exc_info=True)
1412
+ logger.error(
1413
+ f"Failed to calculate comprehensive network statistics: {e}",
1414
+ exc_info=True,
1415
+ )
1153
1416
  return {
1154
1417
  "error": str(e),
1155
1418
  "core_summary": {},
1156
1419
  "pypsa_statistics": {},
1157
1420
  "custom_statistics": {},
1158
1421
  "runtime_info": {"solve_time_seconds": solve_time},
1159
- "solver_info": {"solver_name": solver_name}
1422
+ "solver_info": {"solver_name": solver_name},
1160
1423
  }
1161
-
1162
- def _calculate_statistics_by_year(self, network: 'pypsa.Network', solve_time: float, solver_name: str) -> Dict[int, Dict[str, Any]]:
1424
+
1425
+ def _calculate_statistics_by_year(
1426
+ self, network: "pypsa.Network", solve_time: float, solver_name: str
1427
+ ) -> Dict[int, Dict[str, Any]]:
1163
1428
  """Calculate statistics for each year in the network"""
1164
1429
  try:
1165
1430
  # Extract years from network snapshots or manually extracted years
1166
- if hasattr(network.snapshots, 'year'):
1431
+ if hasattr(network.snapshots, "year"):
1167
1432
  years = sorted(network.snapshots.year.unique())
1168
- elif hasattr(network, '_available_years'):
1433
+ elif hasattr(network, "_available_years"):
1169
1434
  years = network._available_years
1170
- elif hasattr(network.snapshots, 'levels'):
1435
+ elif hasattr(network.snapshots, "levels"):
1171
1436
  # Multi-period optimization - get years from period level
1172
1437
  period_values = network.snapshots.get_level_values(0)
1173
1438
  years = sorted(period_values.unique())
1174
1439
  else:
1175
1440
  # If no year info, skip year-based calculations
1176
- logger.info("No year information found in network - skipping year-based statistics")
1441
+ logger.info(
1442
+ "No year information found in network - skipping year-based statistics"
1443
+ )
1177
1444
  return {}
1178
-
1445
+
1179
1446
  logger.info(f"Calculating year-based statistics for years: {years}")
1180
1447
  year_statistics = {}
1181
-
1448
+
1182
1449
  for year in years:
1183
1450
  try:
1184
- year_stats = self._calculate_network_statistics_for_year(network, year, solve_time, solver_name)
1451
+ year_stats = self._calculate_network_statistics_for_year(
1452
+ network, year, solve_time, solver_name
1453
+ )
1185
1454
  year_statistics[year] = year_stats
1186
1455
  logger.info(f"Calculated statistics for year {year}")
1187
1456
  except Exception as e:
1188
1457
  logger.error(f"Failed to calculate statistics for year {year}: {e}")
1189
1458
  continue
1190
-
1191
- logger.info(f"Successfully calculated year-based statistics for {len(year_statistics)} years")
1459
+
1460
+ logger.info(
1461
+ f"Successfully calculated year-based statistics for {len(year_statistics)} years"
1462
+ )
1192
1463
  return year_statistics
1193
-
1464
+
1194
1465
  except Exception as e:
1195
- logger.error(f"Failed to calculate year-based statistics: {e}", exc_info=True)
1466
+ logger.error(
1467
+ f"Failed to calculate year-based statistics: {e}", exc_info=True
1468
+ )
1196
1469
  return {}
1197
-
1198
- def _calculate_network_statistics_for_year(self, network: 'pypsa.Network', year: int, solve_time: float, solver_name: str) -> Dict[str, Any]:
1470
+
1471
+ def _calculate_network_statistics_for_year(
1472
+ self, network: "pypsa.Network", year: int, solve_time: float, solver_name: str
1473
+ ) -> Dict[str, Any]:
1199
1474
  """Calculate network statistics for a specific year"""
1200
1475
  try:
1201
1476
  # Initialize statistics structure
@@ -1203,181 +1478,208 @@ class NetworkSolver:
1203
1478
  "core_summary": {},
1204
1479
  "custom_statistics": {},
1205
1480
  "runtime_info": {},
1206
- "solver_info": {}
1481
+ "solver_info": {},
1207
1482
  }
1208
-
1483
+
1209
1484
  # Core summary statistics for this year
1210
1485
  total_generation = 0
1211
1486
  total_demand = 0
1212
1487
  unserved_energy = 0
1213
-
1488
+
1214
1489
  # Calculate generation statistics for this year
1215
- if hasattr(network, 'generators_t') and hasattr(network.generators_t, 'p'):
1490
+ if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
1216
1491
  # Filter by year
1217
- year_generation = self._filter_timeseries_by_year(network.generators_t.p, network.snapshots, year)
1492
+ year_generation = self._filter_timeseries_by_year(
1493
+ network.generators_t.p, network.snapshots, year
1494
+ )
1218
1495
  if year_generation is not None and not year_generation.empty:
1219
1496
  # Apply snapshot weightings for this year
1220
1497
  year_weightings = self._get_year_weightings(network, year)
1221
1498
  if year_weightings is not None:
1222
- total_generation = float((year_generation.values * year_weightings[:, None]).sum())
1499
+ total_generation = float(
1500
+ (year_generation.values * year_weightings[:, None]).sum()
1501
+ )
1223
1502
  else:
1224
1503
  total_generation = float(year_generation.sum().sum())
1225
-
1504
+
1226
1505
  # Calculate unserved energy for this year
1227
- if hasattr(network, '_component_type_map'):
1228
- unmet_load_gen_names = [name for name, comp_type in network._component_type_map.items()
1229
- if comp_type == 'UNMET_LOAD']
1230
-
1506
+ if hasattr(network, "_component_type_map"):
1507
+ unmet_load_gen_names = [
1508
+ name
1509
+ for name, comp_type in network._component_type_map.items()
1510
+ if comp_type == "UNMET_LOAD"
1511
+ ]
1512
+
1231
1513
  for gen_name in unmet_load_gen_names:
1232
1514
  if gen_name in year_generation.columns:
1233
1515
  if year_weightings is not None:
1234
- gen_output = float((year_generation[gen_name] * year_weightings).sum())
1516
+ gen_output = float(
1517
+ (
1518
+ year_generation[gen_name] * year_weightings
1519
+ ).sum()
1520
+ )
1235
1521
  else:
1236
1522
  gen_output = float(year_generation[gen_name].sum())
1237
1523
  unserved_energy += gen_output
1238
-
1524
+
1239
1525
  # Calculate demand statistics for this year
1240
- if hasattr(network, 'loads_t') and hasattr(network.loads_t, 'p'):
1241
- year_demand = self._filter_timeseries_by_year(network.loads_t.p, network.snapshots, year)
1526
+ if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
1527
+ year_demand = self._filter_timeseries_by_year(
1528
+ network.loads_t.p, network.snapshots, year
1529
+ )
1242
1530
  if year_demand is not None and not year_demand.empty:
1243
1531
  year_weightings = self._get_year_weightings(network, year)
1244
1532
  if year_weightings is not None:
1245
- total_demand = float((year_demand.values * year_weightings[:, None]).sum())
1533
+ total_demand = float(
1534
+ (year_demand.values * year_weightings[:, None]).sum()
1535
+ )
1246
1536
  else:
1247
1537
  total_demand = float(year_demand.sum().sum())
1248
-
1538
+
1249
1539
  statistics["core_summary"] = {
1250
1540
  "total_generation_mwh": total_generation,
1251
1541
  "total_demand_mwh": total_demand,
1252
1542
  "total_cost": None, # Year-specific cost calculation would be complex
1253
- "load_factor": (total_demand / (total_generation + 1e-6)) if total_generation > 0 else 0,
1254
- "unserved_energy_mwh": unserved_energy
1543
+ "load_factor": (
1544
+ (total_demand / (total_generation + 1e-6))
1545
+ if total_generation > 0
1546
+ else 0
1547
+ ),
1548
+ "unserved_energy_mwh": unserved_energy,
1255
1549
  }
1256
-
1550
+
1257
1551
  # Custom statistics
1258
- unmet_load_percentage = (unserved_energy / (total_demand + 1e-6)) * 100 if total_demand > 0 else 0
1259
-
1552
+ unmet_load_percentage = (
1553
+ (unserved_energy / (total_demand + 1e-6)) * 100
1554
+ if total_demand > 0
1555
+ else 0
1556
+ )
1557
+
1260
1558
  # Calculate year-specific carrier statistics
1261
1559
  year_carrier_stats = self._calculate_year_carrier_statistics(network, year)
1262
-
1560
+
1263
1561
  statistics["custom_statistics"] = {
1264
1562
  "unmet_load_percentage": unmet_load_percentage,
1265
1563
  "year": year,
1266
- **year_carrier_stats # Include all carrier-specific statistics for this year
1564
+ **year_carrier_stats, # Include all carrier-specific statistics for this year
1267
1565
  }
1268
-
1566
+
1269
1567
  # Runtime info
1270
1568
  year_snapshot_count = self._count_year_snapshots(network.snapshots, year)
1271
-
1569
+
1272
1570
  statistics["runtime_info"] = {
1273
1571
  "solve_time_seconds": solve_time,
1274
1572
  "year": year,
1275
- "snapshot_count": year_snapshot_count
1573
+ "snapshot_count": year_snapshot_count,
1276
1574
  }
1277
-
1575
+
1278
1576
  # Solver info
1279
- statistics["solver_info"] = {
1280
- "solver_name": solver_name,
1281
- "year": year
1282
- }
1283
-
1577
+ statistics["solver_info"] = {"solver_name": solver_name, "year": year}
1578
+
1284
1579
  return statistics
1285
-
1580
+
1286
1581
  except Exception as e:
1287
- logger.error(f"Failed to calculate network statistics for year {year}: {e}", exc_info=True)
1582
+ logger.error(
1583
+ f"Failed to calculate network statistics for year {year}: {e}",
1584
+ exc_info=True,
1585
+ )
1288
1586
  return {
1289
1587
  "error": str(e),
1290
1588
  "core_summary": {},
1291
1589
  "custom_statistics": {"year": year},
1292
1590
  "runtime_info": {"solve_time_seconds": solve_time, "year": year},
1293
- "solver_info": {"solver_name": solver_name, "year": year}
1591
+ "solver_info": {"solver_name": solver_name, "year": year},
1294
1592
  }
1295
-
1296
- def _filter_timeseries_by_year(self, timeseries_df: 'pd.DataFrame', snapshots: 'pd.Index', year: int) -> 'pd.DataFrame':
1593
+
1594
+ def _filter_timeseries_by_year(
1595
+ self, timeseries_df: "pd.DataFrame", snapshots: "pd.Index", year: int
1596
+ ) -> "pd.DataFrame":
1297
1597
  """Filter timeseries data by year"""
1298
1598
  try:
1299
1599
  # Handle MultiIndex case (multi-period optimization)
1300
- if hasattr(snapshots, 'levels'):
1600
+ if hasattr(snapshots, "levels"):
1301
1601
  period_values = snapshots.get_level_values(0)
1302
1602
  year_mask = period_values == year
1303
1603
  if year_mask.any():
1304
1604
  year_snapshots = snapshots[year_mask]
1305
1605
  return timeseries_df.loc[year_snapshots]
1306
-
1606
+
1307
1607
  # Handle DatetimeIndex case (regular time series)
1308
- elif hasattr(snapshots, 'year'):
1608
+ elif hasattr(snapshots, "year"):
1309
1609
  year_mask = snapshots.year == year
1310
1610
  if year_mask.any():
1311
1611
  return timeseries_df.loc[year_mask]
1312
-
1612
+
1313
1613
  # Fallback - return None if can't filter
1314
1614
  return None
1315
-
1615
+
1316
1616
  except Exception as e:
1317
1617
  logger.error(f"Failed to filter timeseries by year {year}: {e}")
1318
1618
  return None
1319
-
1320
- def _get_year_weightings(self, network: 'pypsa.Network', year: int) -> 'np.ndarray':
1619
+
1620
+ def _get_year_weightings(self, network: "pypsa.Network", year: int) -> "np.ndarray":
1321
1621
  """Get snapshot weightings for a specific year"""
1322
1622
  try:
1323
1623
  # Filter snapshot weightings by year
1324
- if hasattr(network.snapshots, 'levels'):
1624
+ if hasattr(network.snapshots, "levels"):
1325
1625
  period_values = network.snapshots.get_level_values(0)
1326
1626
  year_mask = period_values == year
1327
1627
  if year_mask.any():
1328
1628
  year_snapshots = network.snapshots[year_mask]
1329
1629
  year_weightings = network.snapshot_weightings.loc[year_snapshots]
1330
1630
  if isinstance(year_weightings, pd.DataFrame):
1331
- if 'objective' in year_weightings.columns:
1332
- return year_weightings['objective'].values
1631
+ if "objective" in year_weightings.columns:
1632
+ return year_weightings["objective"].values
1333
1633
  else:
1334
1634
  return year_weightings.iloc[:, 0].values
1335
1635
  else:
1336
1636
  return year_weightings.values
1337
-
1338
- elif hasattr(network.snapshots, 'year'):
1637
+
1638
+ elif hasattr(network.snapshots, "year"):
1339
1639
  year_mask = network.snapshots.year == year
1340
1640
  if year_mask.any():
1341
1641
  year_weightings = network.snapshot_weightings.loc[year_mask]
1342
1642
  if isinstance(year_weightings, pd.DataFrame):
1343
- if 'objective' in year_weightings.columns:
1344
- return year_weightings['objective'].values
1643
+ if "objective" in year_weightings.columns:
1644
+ return year_weightings["objective"].values
1345
1645
  else:
1346
1646
  return year_weightings.iloc[:, 0].values
1347
1647
  else:
1348
1648
  return year_weightings.values
1349
-
1649
+
1350
1650
  return None
1351
-
1651
+
1352
1652
  except Exception as e:
1353
1653
  logger.error(f"Failed to get year weightings for year {year}: {e}")
1354
1654
  return None
1355
-
1356
- def _count_year_snapshots(self, snapshots: 'pd.Index', year: int) -> int:
1655
+
1656
+ def _count_year_snapshots(self, snapshots: "pd.Index", year: int) -> int:
1357
1657
  """Count snapshots for a specific year"""
1358
1658
  try:
1359
1659
  # Handle MultiIndex case
1360
- if hasattr(snapshots, 'levels'):
1660
+ if hasattr(snapshots, "levels"):
1361
1661
  period_values = snapshots.get_level_values(0)
1362
1662
  year_mask = period_values == year
1363
1663
  return year_mask.sum()
1364
-
1664
+
1365
1665
  # Handle DatetimeIndex case
1366
- elif hasattr(snapshots, 'year'):
1666
+ elif hasattr(snapshots, "year"):
1367
1667
  year_mask = snapshots.year == year
1368
1668
  return year_mask.sum()
1369
-
1669
+
1370
1670
  # Fallback
1371
1671
  return 0
1372
-
1672
+
1373
1673
  except Exception as e:
1374
1674
  logger.error(f"Failed to count snapshots for year {year}: {e}")
1375
1675
  return 0
1376
-
1377
- def _calculate_year_carrier_statistics(self, network: 'pypsa.Network', year: int) -> Dict[str, Any]:
1676
+
1677
+ def _calculate_year_carrier_statistics(
1678
+ self, network: "pypsa.Network", year: int
1679
+ ) -> Dict[str, Any]:
1378
1680
  """Calculate carrier-specific statistics for a specific year"""
1379
1681
  # Note: This is a simplified implementation that doesn't have database access
1380
- # The proper implementation should be done in the storage module where we have conn and network_id
1682
+ # The proper implementation should be done in the storage module where we have conn
1381
1683
  # For now, return empty dictionaries - the storage module will handle this properly
1382
1684
  return {
1383
1685
  "dispatch_by_carrier": {},
@@ -1385,35 +1687,35 @@ class NetworkSolver:
1385
1687
  "emissions_by_carrier": {},
1386
1688
  "capital_cost_by_carrier": {},
1387
1689
  "operational_cost_by_carrier": {},
1388
- "total_system_cost_by_carrier": {}
1690
+ "total_system_cost_by_carrier": {},
1389
1691
  }
1390
-
1692
+
1391
1693
  def _get_generator_carrier_name(self, generator_name: str) -> Optional[str]:
1392
1694
  """Get carrier name for a generator - simplified implementation"""
1393
1695
  # This is a simplified approach - in practice, this should query the database
1394
1696
  # or use the component type mapping from the network
1395
-
1697
+
1396
1698
  # Try to extract carrier from generator name patterns
1397
1699
  gen_lower = generator_name.lower()
1398
-
1399
- if 'coal' in gen_lower:
1400
- return 'coal'
1401
- elif 'gas' in gen_lower or 'ccgt' in gen_lower or 'ocgt' in gen_lower:
1402
- return 'gas'
1403
- elif 'nuclear' in gen_lower:
1404
- return 'nuclear'
1405
- elif 'solar' in gen_lower or 'pv' in gen_lower:
1406
- return 'solar'
1407
- elif 'wind' in gen_lower:
1408
- return 'wind'
1409
- elif 'hydro' in gen_lower:
1410
- return 'hydro'
1411
- elif 'biomass' in gen_lower:
1412
- return 'biomass'
1413
- elif 'battery' in gen_lower:
1414
- return 'battery'
1415
- elif 'unmet' in gen_lower:
1416
- return 'Unmet Load'
1700
+
1701
+ if "coal" in gen_lower:
1702
+ return "coal"
1703
+ elif "gas" in gen_lower or "ccgt" in gen_lower or "ocgt" in gen_lower:
1704
+ return "gas"
1705
+ elif "nuclear" in gen_lower:
1706
+ return "nuclear"
1707
+ elif "solar" in gen_lower or "pv" in gen_lower:
1708
+ return "solar"
1709
+ elif "wind" in gen_lower:
1710
+ return "wind"
1711
+ elif "hydro" in gen_lower:
1712
+ return "hydro"
1713
+ elif "biomass" in gen_lower:
1714
+ return "biomass"
1715
+ elif "battery" in gen_lower:
1716
+ return "battery"
1717
+ elif "unmet" in gen_lower:
1718
+ return "Unmet Load"
1417
1719
  else:
1418
1720
  # Default to generator name if no pattern matches
1419
1721
  return generator_name