pyconvexity 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/__init__.py +226 -0
- pyconvexity/_version.py +1 -0
- pyconvexity/core/__init__.py +60 -0
- pyconvexity/core/database.py +485 -0
- pyconvexity/core/errors.py +106 -0
- pyconvexity/core/types.py +400 -0
- pyconvexity/data/README.md +101 -0
- pyconvexity/data/__init__.py +17 -0
- pyconvexity/data/loaders/__init__.py +3 -0
- pyconvexity/data/loaders/cache.py +213 -0
- pyconvexity/data/schema/01_core_schema.sql +420 -0
- pyconvexity/data/schema/02_data_metadata.sql +120 -0
- pyconvexity/data/schema/03_validation_data.sql +506 -0
- pyconvexity/data/sources/__init__.py +5 -0
- pyconvexity/data/sources/gem.py +442 -0
- pyconvexity/io/__init__.py +26 -0
- pyconvexity/io/excel_exporter.py +1226 -0
- pyconvexity/io/excel_importer.py +1381 -0
- pyconvexity/io/netcdf_exporter.py +197 -0
- pyconvexity/io/netcdf_importer.py +1833 -0
- pyconvexity/models/__init__.py +195 -0
- pyconvexity/models/attributes.py +730 -0
- pyconvexity/models/carriers.py +159 -0
- pyconvexity/models/components.py +611 -0
- pyconvexity/models/network.py +503 -0
- pyconvexity/models/results.py +148 -0
- pyconvexity/models/scenarios.py +234 -0
- pyconvexity/solvers/__init__.py +29 -0
- pyconvexity/solvers/pypsa/__init__.py +24 -0
- pyconvexity/solvers/pypsa/api.py +460 -0
- pyconvexity/solvers/pypsa/batch_loader.py +307 -0
- pyconvexity/solvers/pypsa/builder.py +675 -0
- pyconvexity/solvers/pypsa/constraints.py +405 -0
- pyconvexity/solvers/pypsa/solver.py +1509 -0
- pyconvexity/solvers/pypsa/storage.py +2048 -0
- pyconvexity/timeseries.py +330 -0
- pyconvexity/validation/__init__.py +25 -0
- pyconvexity/validation/rules.py +312 -0
- pyconvexity-0.4.3.dist-info/METADATA +47 -0
- pyconvexity-0.4.3.dist-info/RECORD +42 -0
- pyconvexity-0.4.3.dist-info/WHEEL +5 -0
- pyconvexity-0.4.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1509 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Solving functionality for PyPSA networks.
|
|
3
|
+
|
|
4
|
+
Simplified to always use multi-period optimization for consistency.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import time
|
|
9
|
+
import uuid
|
|
10
|
+
import pandas as pd
|
|
11
|
+
import numpy as np
|
|
12
|
+
from typing import Dict, Any, Optional
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class NetworkSolver:
|
|
18
|
+
"""
|
|
19
|
+
Simplified PyPSA network solver that always uses multi-period optimization.
|
|
20
|
+
|
|
21
|
+
This ensures consistent behavior for both single-year and multi-year models.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, verbose: bool = False):
|
|
25
|
+
"""
|
|
26
|
+
Initialize NetworkSolver.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
verbose: Enable detailed logging output
|
|
30
|
+
"""
|
|
31
|
+
self.verbose = verbose
|
|
32
|
+
|
|
33
|
+
# Import PyPSA with error handling
|
|
34
|
+
try:
|
|
35
|
+
import pypsa
|
|
36
|
+
|
|
37
|
+
self.pypsa = pypsa
|
|
38
|
+
except ImportError as e:
|
|
39
|
+
raise ImportError(
|
|
40
|
+
"PyPSA is not installed or could not be imported. "
|
|
41
|
+
"Please ensure it is installed correctly in the environment."
|
|
42
|
+
) from e
|
|
43
|
+
|
|
44
|
+
def _get_user_settings_path(self):
|
|
45
|
+
"""Get the path to the user settings file (same location as Tauri uses)"""
|
|
46
|
+
try:
|
|
47
|
+
import platform
|
|
48
|
+
import os
|
|
49
|
+
from pathlib import Path
|
|
50
|
+
|
|
51
|
+
system = platform.system()
|
|
52
|
+
if system == "Darwin": # macOS
|
|
53
|
+
home = Path.home()
|
|
54
|
+
app_data_dir = (
|
|
55
|
+
home / "Library" / "Application Support" / "com.convexity.desktop"
|
|
56
|
+
)
|
|
57
|
+
elif system == "Windows":
|
|
58
|
+
app_data_dir = (
|
|
59
|
+
Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
|
|
60
|
+
)
|
|
61
|
+
else: # Linux
|
|
62
|
+
home = Path.home()
|
|
63
|
+
app_data_dir = home / ".local" / "share" / "com.convexity.desktop"
|
|
64
|
+
|
|
65
|
+
settings_file = app_data_dir / "user_settings.json"
|
|
66
|
+
return settings_file if settings_file.exists() else None
|
|
67
|
+
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.warning(f"Failed to determine user settings path: {e}")
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
def _resolve_default_solver(self) -> str:
|
|
73
|
+
"""Resolve 'default' solver to user's preferred solver"""
|
|
74
|
+
try:
|
|
75
|
+
import json
|
|
76
|
+
|
|
77
|
+
settings_path = self._get_user_settings_path()
|
|
78
|
+
if not settings_path:
|
|
79
|
+
logger.debug(
|
|
80
|
+
"User settings file not found, using 'highs' as default solver"
|
|
81
|
+
)
|
|
82
|
+
return "highs"
|
|
83
|
+
|
|
84
|
+
with open(settings_path, "r") as f:
|
|
85
|
+
user_settings = json.load(f)
|
|
86
|
+
|
|
87
|
+
# Get default solver from user settings
|
|
88
|
+
default_solver = user_settings.get("default_solver", "highs")
|
|
89
|
+
|
|
90
|
+
# Validate that it's a known solver
|
|
91
|
+
known_solvers = [
|
|
92
|
+
"highs",
|
|
93
|
+
"gurobi",
|
|
94
|
+
"gurobi (barrier)",
|
|
95
|
+
"gurobi (barrier homogeneous)",
|
|
96
|
+
"gurobi (barrier+crossover balanced)",
|
|
97
|
+
"gurobi (dual simplex)",
|
|
98
|
+
"mosek",
|
|
99
|
+
"mosek (default)",
|
|
100
|
+
"mosek (barrier)",
|
|
101
|
+
"mosek (barrier+crossover)",
|
|
102
|
+
"mosek (dual simplex)",
|
|
103
|
+
"copt",
|
|
104
|
+
"copt (barrier)",
|
|
105
|
+
"copt (barrier homogeneous)",
|
|
106
|
+
"copt (barrier+crossover)",
|
|
107
|
+
"copt (dual simplex)",
|
|
108
|
+
"copt (concurrent)",
|
|
109
|
+
"cplex",
|
|
110
|
+
"glpk",
|
|
111
|
+
"cbc",
|
|
112
|
+
"scip",
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
if default_solver in known_solvers:
|
|
116
|
+
return default_solver
|
|
117
|
+
else:
|
|
118
|
+
logger.warning(
|
|
119
|
+
f"Unknown default solver '{default_solver}' in user settings, falling back to 'highs'"
|
|
120
|
+
)
|
|
121
|
+
return "highs"
|
|
122
|
+
|
|
123
|
+
except Exception as e:
|
|
124
|
+
logger.warning(f"Failed to read default solver from user settings: {e}")
|
|
125
|
+
return "highs"
|
|
126
|
+
|
|
127
|
+
def solve_network(
|
|
128
|
+
self,
|
|
129
|
+
network: "pypsa.Network",
|
|
130
|
+
solver_name: str = "highs",
|
|
131
|
+
solver_options: Optional[Dict[str, Any]] = None,
|
|
132
|
+
discount_rate: Optional[float] = None,
|
|
133
|
+
job_id: Optional[str] = None,
|
|
134
|
+
conn=None,
|
|
135
|
+
scenario_id: Optional[int] = None,
|
|
136
|
+
constraint_applicator=None,
|
|
137
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
138
|
+
) -> Dict[str, Any]:
|
|
139
|
+
"""
|
|
140
|
+
Solve PyPSA network and return results.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
network: PyPSA Network object to solve
|
|
144
|
+
solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
|
|
145
|
+
solver_options: Optional solver-specific options
|
|
146
|
+
discount_rate: Optional discount rate for multi-period optimization
|
|
147
|
+
job_id: Optional job ID for tracking
|
|
148
|
+
custom_solver_config: Optional custom solver configuration when solver_name="custom"
|
|
149
|
+
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
150
|
+
Example: {"solver": "gurobi", "solver_options": {"Method": 2, "Crossover": 0}}
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
Dictionary with solve results and metadata
|
|
154
|
+
|
|
155
|
+
Raises:
|
|
156
|
+
ImportError: If PyPSA is not available
|
|
157
|
+
Exception: If solving fails
|
|
158
|
+
"""
|
|
159
|
+
start_time = time.time()
|
|
160
|
+
run_id = str(uuid.uuid4())
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
# Get solver configuration
|
|
164
|
+
actual_solver_name, solver_config = self._get_solver_config(
|
|
165
|
+
solver_name, solver_options, custom_solver_config
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Resolve discount rate - fallback to 0.0 if None
|
|
169
|
+
# Note: API layer (api.py) handles fetching from network_config before calling this
|
|
170
|
+
effective_discount_rate = (
|
|
171
|
+
discount_rate if discount_rate is not None else 0.0
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
years = list(network.investment_periods)
|
|
175
|
+
|
|
176
|
+
logger.info(f"Solving with {actual_solver_name}: {len(years)} periods {years}, discount rate {effective_discount_rate}")
|
|
177
|
+
|
|
178
|
+
# Calculate investment period weightings with discount rate
|
|
179
|
+
self._calculate_investment_weightings(network, effective_discount_rate)
|
|
180
|
+
|
|
181
|
+
# Set snapshot weightings after multi-period setup
|
|
182
|
+
if conn:
|
|
183
|
+
self._set_snapshot_weightings_after_multiperiod(conn, network)
|
|
184
|
+
|
|
185
|
+
# Prepare optimization constraints - ONLY model constraints
|
|
186
|
+
# Network constraints were already applied before solve in api.py
|
|
187
|
+
extra_functionality = None
|
|
188
|
+
model_constraints = []
|
|
189
|
+
|
|
190
|
+
if conn and constraint_applicator:
|
|
191
|
+
optimization_constraints = (
|
|
192
|
+
constraint_applicator.get_optimization_constraints(
|
|
193
|
+
conn, scenario_id
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
if optimization_constraints:
|
|
197
|
+
# Filter for model constraints only (network constraints already applied)
|
|
198
|
+
for constraint in optimization_constraints:
|
|
199
|
+
constraint_code = constraint.get("constraint_code", "")
|
|
200
|
+
constraint_type = self._detect_constraint_type(constraint_code)
|
|
201
|
+
|
|
202
|
+
if constraint_type == "model_constraint":
|
|
203
|
+
model_constraints.append(constraint)
|
|
204
|
+
|
|
205
|
+
if model_constraints:
|
|
206
|
+
extra_functionality = self._create_extra_functionality(
|
|
207
|
+
model_constraints, constraint_applicator
|
|
208
|
+
)
|
|
209
|
+
if self.verbose:
|
|
210
|
+
logger.info(
|
|
211
|
+
f"Prepared {len(model_constraints)} model constraints for optimization"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# NOTE: Model constraints are applied DURING solve via extra_functionality
|
|
215
|
+
# Network constraints were already applied to the network structure before solve
|
|
216
|
+
|
|
217
|
+
if self.verbose:
|
|
218
|
+
logger.info(f"Snapshots: {len(network.snapshots)}, Solver options: {solver_config}")
|
|
219
|
+
|
|
220
|
+
if solver_config:
|
|
221
|
+
result = network.optimize(
|
|
222
|
+
solver_name=actual_solver_name,
|
|
223
|
+
multi_investment_periods=True,
|
|
224
|
+
extra_functionality=extra_functionality,
|
|
225
|
+
**solver_config,
|
|
226
|
+
)
|
|
227
|
+
else:
|
|
228
|
+
result = network.optimize(
|
|
229
|
+
solver_name=actual_solver_name,
|
|
230
|
+
multi_investment_periods=True,
|
|
231
|
+
extra_functionality=extra_functionality,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
solve_time = time.time() - start_time
|
|
235
|
+
|
|
236
|
+
# Extract solve results with comprehensive statistics
|
|
237
|
+
solve_result = self._extract_solve_results(
|
|
238
|
+
network, result, solve_time, actual_solver_name, run_id
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Calculate comprehensive network statistics (all years combined)
|
|
242
|
+
if solve_result.get("success"):
|
|
243
|
+
network_statistics = self._calculate_comprehensive_network_statistics(
|
|
244
|
+
network, solve_time, actual_solver_name
|
|
245
|
+
)
|
|
246
|
+
solve_result["network_statistics"] = network_statistics
|
|
247
|
+
|
|
248
|
+
# Calculate year-based statistics for capacity expansion analysis
|
|
249
|
+
year_statistics = self._calculate_statistics_by_year(
|
|
250
|
+
network, solve_time, actual_solver_name
|
|
251
|
+
)
|
|
252
|
+
solve_result["year_statistics"] = year_statistics
|
|
253
|
+
solve_result["year_statistics_available"] = len(year_statistics) > 0
|
|
254
|
+
|
|
255
|
+
objective_value = getattr(network, "objective", None)
|
|
256
|
+
logger.info(
|
|
257
|
+
f"Solve completed in {solve_time:.2f}s - status: {solve_result['status']}, objective: {objective_value}"
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
return solve_result
|
|
261
|
+
|
|
262
|
+
except Exception as e:
|
|
263
|
+
solve_time = time.time() - start_time
|
|
264
|
+
logger.error(f"Solve failed after {solve_time:.2f} seconds: {e}")
|
|
265
|
+
logger.exception("Full solve error traceback:")
|
|
266
|
+
|
|
267
|
+
return {
|
|
268
|
+
"success": False,
|
|
269
|
+
"status": "failed",
|
|
270
|
+
"error": str(e),
|
|
271
|
+
"solve_time": solve_time,
|
|
272
|
+
"solver_name": (
|
|
273
|
+
actual_solver_name
|
|
274
|
+
if "actual_solver_name" in locals()
|
|
275
|
+
else solver_name
|
|
276
|
+
),
|
|
277
|
+
"run_id": run_id,
|
|
278
|
+
"objective_value": None,
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
def _get_solver_config(
|
|
282
|
+
self,
|
|
283
|
+
solver_name: str,
|
|
284
|
+
solver_options: Optional[Dict[str, Any]] = None,
|
|
285
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
286
|
+
) -> tuple[str, Optional[Dict[str, Any]]]:
|
|
287
|
+
"""
|
|
288
|
+
Get the actual solver name and options for special solver configurations.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
solver_name: The solver name (e.g., 'gurobi (barrier)', 'highs', 'custom')
|
|
292
|
+
solver_options: Optional additional solver options
|
|
293
|
+
custom_solver_config: Optional custom solver configuration for solver_name='custom'
|
|
294
|
+
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
Tuple of (actual_solver_name, solver_options_dict)
|
|
298
|
+
"""
|
|
299
|
+
# Handle "custom" solver with custom configuration
|
|
300
|
+
if solver_name == "custom":
|
|
301
|
+
if not custom_solver_config:
|
|
302
|
+
raise ValueError(
|
|
303
|
+
"custom_solver_config must be provided when solver_name='custom'"
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
if "solver" not in custom_solver_config:
|
|
307
|
+
raise ValueError(
|
|
308
|
+
"custom_solver_config must contain 'solver' key with the actual solver name"
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
actual_solver = custom_solver_config["solver"]
|
|
312
|
+
custom_options = custom_solver_config.get("solver_options", {})
|
|
313
|
+
|
|
314
|
+
# Merge with any additional solver_options passed separately
|
|
315
|
+
if solver_options:
|
|
316
|
+
merged_options = {
|
|
317
|
+
"solver_options": {**custom_options, **solver_options}
|
|
318
|
+
}
|
|
319
|
+
else:
|
|
320
|
+
merged_options = (
|
|
321
|
+
{"solver_options": custom_options} if custom_options else None
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
return actual_solver, merged_options
|
|
325
|
+
|
|
326
|
+
# Handle "default" solver
|
|
327
|
+
if solver_name == "default":
|
|
328
|
+
# Try to read user's default solver preference
|
|
329
|
+
actual_solver = self._resolve_default_solver()
|
|
330
|
+
return actual_solver, solver_options
|
|
331
|
+
|
|
332
|
+
# Handle special Gurobi configurations
|
|
333
|
+
if solver_name == "gurobi (barrier)":
|
|
334
|
+
gurobi_barrier_options = {
|
|
335
|
+
"solver_options": {
|
|
336
|
+
"Method": 2, # Barrier
|
|
337
|
+
"Crossover": 0, # Skip crossover
|
|
338
|
+
"MIPGap": 0.05, # 5% gap
|
|
339
|
+
"Threads": 0, # Use all cores (0 = auto)
|
|
340
|
+
"Presolve": 2, # Aggressive presolve
|
|
341
|
+
"ConcurrentMIP": 1, # Parallel root strategies
|
|
342
|
+
"BarConvTol": 1e-4, # Relaxed barrier convergence
|
|
343
|
+
"FeasibilityTol": 1e-5,
|
|
344
|
+
"OptimalityTol": 1e-5,
|
|
345
|
+
"NumericFocus": 1, # Improve stability
|
|
346
|
+
"PreSparsify": 1,
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
# Merge with any additional options
|
|
350
|
+
if solver_options:
|
|
351
|
+
gurobi_barrier_options.update(solver_options)
|
|
352
|
+
return "gurobi", gurobi_barrier_options
|
|
353
|
+
|
|
354
|
+
elif solver_name == "gurobi (barrier homogeneous)":
|
|
355
|
+
gurobi_barrier_homogeneous_options = {
|
|
356
|
+
"solver_options": {
|
|
357
|
+
"Method": 2, # Barrier
|
|
358
|
+
"Crossover": 0, # Skip crossover
|
|
359
|
+
"MIPGap": 0.05,
|
|
360
|
+
"Threads": 0, # Use all cores (0 = auto)
|
|
361
|
+
"Presolve": 2,
|
|
362
|
+
"ConcurrentMIP": 1,
|
|
363
|
+
"BarConvTol": 1e-4,
|
|
364
|
+
"FeasibilityTol": 1e-5,
|
|
365
|
+
"OptimalityTol": 1e-5,
|
|
366
|
+
"NumericFocus": 1,
|
|
367
|
+
"PreSparsify": 1,
|
|
368
|
+
"BarHomogeneous": 1, # Enable homogeneous barrier algorithm
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
if solver_options:
|
|
372
|
+
gurobi_barrier_homogeneous_options.update(solver_options)
|
|
373
|
+
return "gurobi", gurobi_barrier_homogeneous_options
|
|
374
|
+
|
|
375
|
+
elif solver_name == "gurobi (barrier+crossover balanced)":
|
|
376
|
+
gurobi_options_balanced = {
|
|
377
|
+
"solver_options": {
|
|
378
|
+
"Method": 2,
|
|
379
|
+
"Crossover": 1, # Dual crossover
|
|
380
|
+
"MIPGap": 0.01,
|
|
381
|
+
"Threads": 0, # Use all cores (0 = auto)
|
|
382
|
+
"Presolve": 2,
|
|
383
|
+
"Heuristics": 0.1,
|
|
384
|
+
"Cuts": 2,
|
|
385
|
+
"ConcurrentMIP": 1,
|
|
386
|
+
"BarConvTol": 1e-6,
|
|
387
|
+
"FeasibilityTol": 1e-6,
|
|
388
|
+
"OptimalityTol": 1e-6,
|
|
389
|
+
"NumericFocus": 1,
|
|
390
|
+
"PreSparsify": 1,
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
if solver_options:
|
|
394
|
+
gurobi_options_balanced.update(solver_options)
|
|
395
|
+
return "gurobi", gurobi_options_balanced
|
|
396
|
+
|
|
397
|
+
elif solver_name == "gurobi (dual simplex)":
|
|
398
|
+
gurobi_dual_options = {
|
|
399
|
+
"solver_options": {
|
|
400
|
+
"Method": 1, # Dual simplex method
|
|
401
|
+
"Threads": 0, # Use all available cores
|
|
402
|
+
"Presolve": 2, # Aggressive presolve
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
if solver_options:
|
|
406
|
+
gurobi_dual_options.update(solver_options)
|
|
407
|
+
return "gurobi", gurobi_dual_options
|
|
408
|
+
|
|
409
|
+
# Handle special Mosek configurations
|
|
410
|
+
elif solver_name == "mosek (default)":
|
|
411
|
+
# No custom options - let Mosek use its default configuration
|
|
412
|
+
mosek_default_options = {
|
|
413
|
+
"solver_options": {
|
|
414
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # MIP relative gap tolerance (5% to match Gurobi)
|
|
415
|
+
"MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
if solver_options:
|
|
419
|
+
mosek_default_options["solver_options"].update(solver_options)
|
|
420
|
+
return "mosek", mosek_default_options
|
|
421
|
+
|
|
422
|
+
elif solver_name == "mosek (barrier)":
|
|
423
|
+
mosek_barrier_options = {
|
|
424
|
+
"solver_options": {
|
|
425
|
+
"MSK_IPAR_INTPNT_BASIS": 0, # Skip crossover (barrier-only) - 0 = MSK_BI_NEVER
|
|
426
|
+
"MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance
|
|
427
|
+
"MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi primal feasibility
|
|
428
|
+
"MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi dual feasibility
|
|
429
|
+
# Removed MSK_DPAR_INTPNT_TOL_INFEAS - was 1000x tighter than other tolerances!
|
|
430
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
|
|
431
|
+
"MSK_IPAR_PRESOLVE_USE": 2, # Aggressive presolve (match Gurobi Presolve=2)
|
|
432
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap
|
|
433
|
+
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
|
|
434
|
+
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
if solver_options:
|
|
438
|
+
mosek_barrier_options["solver_options"].update(solver_options)
|
|
439
|
+
return "mosek", mosek_barrier_options
|
|
440
|
+
|
|
441
|
+
elif solver_name == "mosek (barrier+crossover)":
|
|
442
|
+
mosek_barrier_crossover_options = {
|
|
443
|
+
"solver_options": {
|
|
444
|
+
"MSK_IPAR_INTPNT_BASIS": 1, # Always crossover (1 = MSK_BI_ALWAYS)
|
|
445
|
+
"MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance (was 1e-6)
|
|
446
|
+
"MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi (was 1e-6)
|
|
447
|
+
"MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi (was 1e-6)
|
|
448
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
|
|
449
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
450
|
+
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
|
|
451
|
+
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
if solver_options:
|
|
455
|
+
mosek_barrier_crossover_options["solver_options"].update(solver_options)
|
|
456
|
+
return "mosek", mosek_barrier_crossover_options
|
|
457
|
+
|
|
458
|
+
elif solver_name == "mosek (dual simplex)":
|
|
459
|
+
mosek_dual_options = {
|
|
460
|
+
"solver_options": {
|
|
461
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = automatic)
|
|
462
|
+
"MSK_IPAR_PRESOLVE_USE": 1, # Force presolve
|
|
463
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
464
|
+
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 1, # Use dual simplex for MIP root
|
|
465
|
+
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
if solver_options:
|
|
469
|
+
mosek_dual_options["solver_options"].update(solver_options)
|
|
470
|
+
return "mosek", mosek_dual_options
|
|
471
|
+
|
|
472
|
+
# Check if this is a known valid solver name
|
|
473
|
+
elif solver_name == "mosek":
|
|
474
|
+
# Add default MILP-friendly settings for plain Mosek
|
|
475
|
+
mosek_defaults = {
|
|
476
|
+
"solver_options": {
|
|
477
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-4)
|
|
478
|
+
"MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
|
|
479
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all cores (0 = auto)
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
if solver_options:
|
|
483
|
+
mosek_defaults["solver_options"].update(solver_options)
|
|
484
|
+
return solver_name, mosek_defaults
|
|
485
|
+
|
|
486
|
+
elif solver_name == "gurobi":
|
|
487
|
+
# Add default MILP-friendly settings for plain Gurobi (for consistency)
|
|
488
|
+
gurobi_defaults = {
|
|
489
|
+
"solver_options": {
|
|
490
|
+
"MIPGap": 1e-4, # 0.01% gap
|
|
491
|
+
"TimeLimit": 3600, # 1 hour
|
|
492
|
+
"Threads": 0, # Use all cores
|
|
493
|
+
"OutputFlag": 1, # Enable output
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
if solver_options:
|
|
497
|
+
gurobi_defaults["solver_options"].update(solver_options)
|
|
498
|
+
return solver_name, gurobi_defaults
|
|
499
|
+
|
|
500
|
+
# Handle special COPT configurations
|
|
501
|
+
elif solver_name == "copt (barrier)":
|
|
502
|
+
copt_barrier_options = {
|
|
503
|
+
"solver_options": {
|
|
504
|
+
"LpMethod": 2, # Barrier method
|
|
505
|
+
"Crossover": 0, # Skip crossover for speed
|
|
506
|
+
"RelGap": 0.05, # 5% MIP gap (match Gurobi)
|
|
507
|
+
"TimeLimit": 7200, # 1 hour time limit
|
|
508
|
+
"Threads": -1, # 4 threads (memory-conscious)
|
|
509
|
+
"Presolve": 3, # Aggressive presolve
|
|
510
|
+
"Scaling": 1, # Enable scaling
|
|
511
|
+
"FeasTol": 1e-5, # Match Gurobi feasibility
|
|
512
|
+
"DualTol": 1e-5, # Match Gurobi dual tolerance
|
|
513
|
+
# MIP performance settings
|
|
514
|
+
"CutLevel": 2, # Normal cut generation
|
|
515
|
+
"HeurLevel": 3, # Aggressive heuristics
|
|
516
|
+
"StrongBranching": 1, # Fast strong branching
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
if solver_options:
|
|
520
|
+
copt_barrier_options["solver_options"].update(solver_options)
|
|
521
|
+
return "copt", copt_barrier_options
|
|
522
|
+
|
|
523
|
+
elif solver_name == "copt (barrier homogeneous)":
|
|
524
|
+
copt_barrier_homogeneous_options = {
|
|
525
|
+
"solver_options": {
|
|
526
|
+
"LpMethod": 2, # Barrier method
|
|
527
|
+
"Crossover": 0, # Skip crossover
|
|
528
|
+
"BarHomogeneous": 1, # Use homogeneous self-dual form
|
|
529
|
+
"RelGap": 0.05, # 5% MIP gap
|
|
530
|
+
"TimeLimit": 3600, # 1 hour
|
|
531
|
+
"Threads": -1, # 4 threads (memory-conscious)
|
|
532
|
+
"Presolve": 3, # Aggressive presolve
|
|
533
|
+
"Scaling": 1, # Enable scaling
|
|
534
|
+
"FeasTol": 1e-5,
|
|
535
|
+
"DualTol": 1e-5,
|
|
536
|
+
# MIP performance settings
|
|
537
|
+
"CutLevel": 2, # Normal cuts
|
|
538
|
+
"HeurLevel": 3, # Aggressive heuristics
|
|
539
|
+
"StrongBranching": 1, # Fast strong branching
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
if solver_options:
|
|
543
|
+
copt_barrier_homogeneous_options["solver_options"].update(
|
|
544
|
+
solver_options
|
|
545
|
+
)
|
|
546
|
+
return "copt", copt_barrier_homogeneous_options
|
|
547
|
+
|
|
548
|
+
elif solver_name == "copt (barrier+crossover)":
|
|
549
|
+
copt_barrier_crossover_options = {
|
|
550
|
+
"solver_options": {
|
|
551
|
+
"LpMethod": 2, # Barrier method
|
|
552
|
+
"Crossover": 1, # Enable crossover for better solutions
|
|
553
|
+
"RelGap": 0.05, # 5% MIP gap (relaxed for faster solves)
|
|
554
|
+
"TimeLimit": 36000, # 10 hour
|
|
555
|
+
"Threads": -1, # Use all cores
|
|
556
|
+
"Presolve": 2, # Aggressive presolve
|
|
557
|
+
"Scaling": 1, # Enable scaling
|
|
558
|
+
"FeasTol": 1e-4, # Tighter feasibility
|
|
559
|
+
"DualTol": 1e-4, # Tighter dual tolerance
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
if solver_options:
|
|
563
|
+
copt_barrier_crossover_options["solver_options"].update(solver_options)
|
|
564
|
+
return "copt", copt_barrier_crossover_options
|
|
565
|
+
|
|
566
|
+
elif solver_name == "copt (dual simplex)":
|
|
567
|
+
copt_dual_simplex_options = {
|
|
568
|
+
"solver_options": {
|
|
569
|
+
"LpMethod": 1, # Dual simplex method
|
|
570
|
+
"RelGap": 0.05, # 5% MIP gap
|
|
571
|
+
"TimeLimit": 3600, # 1 hour
|
|
572
|
+
"Threads": -1, # Use all cores
|
|
573
|
+
"Presolve": 3, # Aggressive presolve
|
|
574
|
+
"Scaling": 1, # Enable scaling
|
|
575
|
+
"FeasTol": 1e-6,
|
|
576
|
+
"DualTol": 1e-6,
|
|
577
|
+
# MIP performance settings
|
|
578
|
+
"CutLevel": 2, # Normal cuts
|
|
579
|
+
"HeurLevel": 2, # Normal heuristics
|
|
580
|
+
"StrongBranching": 1, # Fast strong branching
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
if solver_options:
|
|
584
|
+
copt_dual_simplex_options["solver_options"].update(solver_options)
|
|
585
|
+
return "copt", copt_dual_simplex_options
|
|
586
|
+
|
|
587
|
+
elif solver_name == "copt (concurrent)":
|
|
588
|
+
copt_concurrent_options = {
|
|
589
|
+
"solver_options": {
|
|
590
|
+
"LpMethod": 4, # Concurrent (simplex + barrier)
|
|
591
|
+
"RelGap": 0.05, # 5% MIP gap
|
|
592
|
+
"TimeLimit": 3600, # 1 hour
|
|
593
|
+
"Threads": -1, # Use all cores
|
|
594
|
+
"Presolve": 3, # Aggressive presolve
|
|
595
|
+
"Scaling": 1, # Enable scaling
|
|
596
|
+
"FeasTol": 1e-5,
|
|
597
|
+
"DualTol": 1e-5,
|
|
598
|
+
# MIP performance settings
|
|
599
|
+
"CutLevel": 2, # Normal cuts
|
|
600
|
+
"HeurLevel": 3, # Aggressive heuristics
|
|
601
|
+
"StrongBranching": 1, # Fast strong branching
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
if solver_options:
|
|
605
|
+
copt_concurrent_options["solver_options"].update(solver_options)
|
|
606
|
+
return "copt", copt_concurrent_options
|
|
607
|
+
|
|
608
|
+
elif solver_name in ["highs", "cplex", "glpk", "cbc", "scip", "copt"]:
|
|
609
|
+
return solver_name, solver_options
|
|
610
|
+
|
|
611
|
+
else:
|
|
612
|
+
# Unknown solver name - log warning and fall back to highs
|
|
613
|
+
logger.warning(
|
|
614
|
+
f"Unknown solver name '{solver_name}' - falling back to 'highs'"
|
|
615
|
+
)
|
|
616
|
+
return "highs", solver_options
|
|
617
|
+
|
|
618
|
+
def _detect_constraint_type(self, constraint_code: str) -> str:
|
|
619
|
+
"""
|
|
620
|
+
Detect if constraint is network-modification or model-constraint type.
|
|
621
|
+
|
|
622
|
+
Args:
|
|
623
|
+
constraint_code: The constraint code to analyze
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
"model_constraint" or "network_modification"
|
|
627
|
+
"""
|
|
628
|
+
# Type 2 indicators (model constraints) - need access to optimization model
|
|
629
|
+
model_indicators = [
|
|
630
|
+
"n.optimize.create_model()",
|
|
631
|
+
"m.variables",
|
|
632
|
+
"m.add_constraints",
|
|
633
|
+
"gen_p =",
|
|
634
|
+
"constraint_expr =",
|
|
635
|
+
"LinearExpression",
|
|
636
|
+
"linopy",
|
|
637
|
+
"Generator-p",
|
|
638
|
+
"lhs <=",
|
|
639
|
+
"constraint_expr =",
|
|
640
|
+
]
|
|
641
|
+
|
|
642
|
+
# Type 1 indicators (network modifications) - modify network directly
|
|
643
|
+
network_indicators = [
|
|
644
|
+
"n.generators.loc",
|
|
645
|
+
"n.add(",
|
|
646
|
+
"n.buses.",
|
|
647
|
+
"n.lines.",
|
|
648
|
+
"network.generators.loc",
|
|
649
|
+
"network.add(",
|
|
650
|
+
"network.buses.",
|
|
651
|
+
"network.lines.",
|
|
652
|
+
]
|
|
653
|
+
|
|
654
|
+
# Check for model constraint indicators first (more specific)
|
|
655
|
+
if any(indicator in constraint_code for indicator in model_indicators):
|
|
656
|
+
return "model_constraint"
|
|
657
|
+
elif any(indicator in constraint_code for indicator in network_indicators):
|
|
658
|
+
return "network_modification"
|
|
659
|
+
else:
|
|
660
|
+
# Default to network_modification for safety (existing behavior)
|
|
661
|
+
return "network_modification"
|
|
662
|
+
|
|
663
|
+
def _create_extra_functionality(
|
|
664
|
+
self, optimization_constraints: list, constraint_applicator
|
|
665
|
+
) -> callable:
|
|
666
|
+
"""
|
|
667
|
+
Create extra_functionality function for optimization-time constraints.
|
|
668
|
+
|
|
669
|
+
This matches the old PyPSA solver's approach to applying constraints during optimization.
|
|
670
|
+
|
|
671
|
+
Args:
|
|
672
|
+
optimization_constraints: List of optimization constraint dictionaries
|
|
673
|
+
constraint_applicator: ConstraintApplicator instance
|
|
674
|
+
|
|
675
|
+
Returns:
|
|
676
|
+
Function that can be passed to network.optimize(extra_functionality=...)
|
|
677
|
+
"""
|
|
678
|
+
|
|
679
|
+
def extra_functionality(network, snapshots):
|
|
680
|
+
"""Apply optimization constraints during solve - matches old code structure"""
|
|
681
|
+
try:
|
|
682
|
+
# Apply each constraint in priority order
|
|
683
|
+
sorted_constraints = sorted(
|
|
684
|
+
optimization_constraints, key=lambda x: x.get("priority", 0)
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
for constraint in sorted_constraints:
|
|
688
|
+
try:
|
|
689
|
+
constraint_applicator.apply_optimization_constraint(
|
|
690
|
+
network, snapshots, constraint
|
|
691
|
+
)
|
|
692
|
+
except Exception as e:
|
|
693
|
+
logger.error(
|
|
694
|
+
f"Failed to apply optimization constraint {constraint.get('name', 'unknown')}: {e}"
|
|
695
|
+
)
|
|
696
|
+
continue
|
|
697
|
+
|
|
698
|
+
except Exception as e:
|
|
699
|
+
logger.error(f"Failed to apply optimization constraints: {e}")
|
|
700
|
+
# Don't re-raise - let optimization continue
|
|
701
|
+
|
|
702
|
+
return extra_functionality
|
|
703
|
+
|
|
704
|
+
def _set_snapshot_weightings_after_multiperiod(
|
|
705
|
+
self, conn, network: "pypsa.Network"
|
|
706
|
+
):
|
|
707
|
+
"""Set snapshot weightings AFTER multi-period setup - matches old code approach (single network per database)."""
|
|
708
|
+
try:
|
|
709
|
+
from pyconvexity.models import get_network_time_periods, get_network_info
|
|
710
|
+
|
|
711
|
+
time_periods = get_network_time_periods(conn)
|
|
712
|
+
if time_periods and len(network.snapshots) > 0:
|
|
713
|
+
# Get network info to determine time interval
|
|
714
|
+
network_info = get_network_info(conn)
|
|
715
|
+
time_interval = network_info.get("time_interval", "1H")
|
|
716
|
+
weight = self._parse_time_interval(time_interval)
|
|
717
|
+
|
|
718
|
+
if weight is None:
|
|
719
|
+
weight = 1.0
|
|
720
|
+
logger.warning(
|
|
721
|
+
f"Could not parse time interval '{time_interval}', using default weight of 1.0"
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
# Create weightings array - all snapshots get the same weight for this time resolution
|
|
725
|
+
weightings = [weight] * len(time_periods)
|
|
726
|
+
|
|
727
|
+
if len(weightings) == len(network.snapshots):
|
|
728
|
+
# Set all three columns like the old code - critical for proper objective calculation
|
|
729
|
+
network.snapshot_weightings.loc[:, "objective"] = weightings
|
|
730
|
+
network.snapshot_weightings.loc[:, "generators"] = weightings
|
|
731
|
+
network.snapshot_weightings.loc[:, "stores"] = weightings
|
|
732
|
+
else:
|
|
733
|
+
logger.warning(
|
|
734
|
+
f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})"
|
|
735
|
+
)
|
|
736
|
+
except Exception as e:
|
|
737
|
+
logger.warning(
|
|
738
|
+
f"Failed to set snapshot weightings after multi-period setup: {e}"
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
def _parse_time_interval(self, time_interval: str) -> Optional[float]:
|
|
742
|
+
"""Parse time interval string to hours - handles multiple formats."""
|
|
743
|
+
if not time_interval:
|
|
744
|
+
return None
|
|
745
|
+
|
|
746
|
+
try:
|
|
747
|
+
# Clean up the string
|
|
748
|
+
interval = time_interval.strip()
|
|
749
|
+
|
|
750
|
+
# Handle ISO 8601 duration format (PT3H, PT30M, etc.)
|
|
751
|
+
if interval.startswith("PT") and interval.endswith("H"):
|
|
752
|
+
# Extract hours (e.g., 'PT3H' -> 3.0)
|
|
753
|
+
hours_str = interval[2:-1] # Remove 'PT' and 'H'
|
|
754
|
+
return float(hours_str)
|
|
755
|
+
elif interval.startswith("PT") and interval.endswith("M"):
|
|
756
|
+
# Extract minutes (e.g., 'PT30M' -> 0.5)
|
|
757
|
+
minutes_str = interval[2:-1] # Remove 'PT' and 'M'
|
|
758
|
+
return float(minutes_str) / 60.0
|
|
759
|
+
elif interval.startswith("PT") and interval.endswith("S"):
|
|
760
|
+
# Extract seconds (e.g., 'PT3600S' -> 1.0)
|
|
761
|
+
seconds_str = interval[2:-1] # Remove 'PT' and 'S'
|
|
762
|
+
return float(seconds_str) / 3600.0
|
|
763
|
+
|
|
764
|
+
# Handle simple frequency strings (3H, 2D, etc.)
|
|
765
|
+
elif interval.endswith("H") or interval.endswith("h"):
|
|
766
|
+
hours_str = interval[:-1]
|
|
767
|
+
return float(hours_str) if hours_str else 1.0
|
|
768
|
+
elif interval.endswith("D") or interval.endswith("d"):
|
|
769
|
+
days_str = interval[:-1]
|
|
770
|
+
return float(days_str) * 24 if days_str else 24.0
|
|
771
|
+
elif interval.endswith("M") or interval.endswith("m"):
|
|
772
|
+
minutes_str = interval[:-1]
|
|
773
|
+
return float(minutes_str) / 60.0 if minutes_str else 1.0 / 60.0
|
|
774
|
+
elif interval.endswith("S") or interval.endswith("s"):
|
|
775
|
+
seconds_str = interval[:-1]
|
|
776
|
+
return float(seconds_str) / 3600.0 if seconds_str else 1.0 / 3600.0
|
|
777
|
+
|
|
778
|
+
# Try to parse as plain number (assume hours)
|
|
779
|
+
else:
|
|
780
|
+
return float(interval)
|
|
781
|
+
|
|
782
|
+
except (ValueError, TypeError) as e:
|
|
783
|
+
logger.warning(f"Could not parse time interval '{time_interval}': {e}")
|
|
784
|
+
return None
|
|
785
|
+
|
|
786
|
+
def _calculate_investment_weightings(
|
|
787
|
+
self, network: "pypsa.Network", discount_rate: float
|
|
788
|
+
) -> None:
|
|
789
|
+
"""
|
|
790
|
+
Calculate investment period weightings using discount rate - matching old PyPSA solver exactly.
|
|
791
|
+
|
|
792
|
+
Args:
|
|
793
|
+
network: PyPSA Network object
|
|
794
|
+
discount_rate: Discount rate for NPV calculations
|
|
795
|
+
"""
|
|
796
|
+
try:
|
|
797
|
+
import pandas as pd
|
|
798
|
+
|
|
799
|
+
if (
|
|
800
|
+
not hasattr(network, "investment_periods")
|
|
801
|
+
or len(network.investment_periods) == 0
|
|
802
|
+
):
|
|
803
|
+
return
|
|
804
|
+
|
|
805
|
+
years = network.investment_periods
|
|
806
|
+
# Convert pandas Index to list for easier handling
|
|
807
|
+
years_list = years.tolist() if hasattr(years, "tolist") else list(years)
|
|
808
|
+
|
|
809
|
+
# For single year, use simple weighting of 1.0
|
|
810
|
+
if len(years_list) == 1:
|
|
811
|
+
# Single year case
|
|
812
|
+
network.investment_period_weightings = pd.DataFrame(
|
|
813
|
+
{
|
|
814
|
+
"objective": pd.Series({years_list[0]: 1.0}),
|
|
815
|
+
"years": pd.Series({years_list[0]: 1}),
|
|
816
|
+
}
|
|
817
|
+
)
|
|
818
|
+
else:
|
|
819
|
+
# Multi-year case - EXACTLY match old code logic
|
|
820
|
+
# Get unique years from the network snapshots to determine period lengths
|
|
821
|
+
if hasattr(network.snapshots, "year"):
|
|
822
|
+
snapshot_years = sorted(network.snapshots.year.unique())
|
|
823
|
+
elif hasattr(network.snapshots, "get_level_values"):
|
|
824
|
+
# MultiIndex case - get years from 'period' level
|
|
825
|
+
snapshot_years = sorted(
|
|
826
|
+
network.snapshots.get_level_values("period").unique()
|
|
827
|
+
)
|
|
828
|
+
else:
|
|
829
|
+
# Fallback: use investment periods as years
|
|
830
|
+
snapshot_years = years_list
|
|
831
|
+
|
|
832
|
+
# Calculate years per period - EXACTLY matching old code
|
|
833
|
+
years_diff = []
|
|
834
|
+
for i, year in enumerate(years_list):
|
|
835
|
+
if i < len(years_list) - 1:
|
|
836
|
+
# Years between this period and the next
|
|
837
|
+
next_year = years_list[i + 1]
|
|
838
|
+
period_years = next_year - year
|
|
839
|
+
else:
|
|
840
|
+
# For the last period, calculate based on snapshot coverage
|
|
841
|
+
if snapshot_years:
|
|
842
|
+
# Find the last snapshot year that's >= current period year
|
|
843
|
+
last_snapshot_year = max(
|
|
844
|
+
[y for y in snapshot_years if y >= year]
|
|
845
|
+
)
|
|
846
|
+
period_years = last_snapshot_year - year + 1
|
|
847
|
+
else:
|
|
848
|
+
# Fallback: assume same length as previous period or 1
|
|
849
|
+
if len(years_diff) > 0:
|
|
850
|
+
period_years = years_diff[-1] # Same as previous period
|
|
851
|
+
else:
|
|
852
|
+
period_years = 1
|
|
853
|
+
|
|
854
|
+
years_diff.append(period_years)
|
|
855
|
+
|
|
856
|
+
# Create weightings DataFrame with years column
|
|
857
|
+
weightings_df = pd.DataFrame(
|
|
858
|
+
{"years": pd.Series(years_diff, index=years_list)}
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
# Calculate objective weightings with discount rate - EXACTLY matching old code
|
|
862
|
+
r = discount_rate
|
|
863
|
+
T = 0 # Cumulative time tracker
|
|
864
|
+
|
|
865
|
+
for period, nyears in weightings_df.years.items():
|
|
866
|
+
# Calculate discount factors for each year in this period
|
|
867
|
+
discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
|
|
868
|
+
period_weighting = sum(discounts)
|
|
869
|
+
weightings_df.at[period, "objective"] = period_weighting
|
|
870
|
+
T += nyears # Update cumulative time
|
|
871
|
+
|
|
872
|
+
network.investment_period_weightings = weightings_df
|
|
873
|
+
|
|
874
|
+
except Exception as e:
|
|
875
|
+
logger.error(f"Failed to calculate investment weightings: {e}")
|
|
876
|
+
logger.exception("Full traceback:")
|
|
877
|
+
|
|
878
|
+
def _extract_solve_results(
|
|
879
|
+
self,
|
|
880
|
+
network: "pypsa.Network",
|
|
881
|
+
result: Any,
|
|
882
|
+
solve_time: float,
|
|
883
|
+
solver_name: str,
|
|
884
|
+
run_id: str,
|
|
885
|
+
) -> Dict[str, Any]:
|
|
886
|
+
"""
|
|
887
|
+
Extract solve results from PyPSA network.
|
|
888
|
+
|
|
889
|
+
Args:
|
|
890
|
+
network: Solved PyPSA Network object
|
|
891
|
+
result: PyPSA solve result
|
|
892
|
+
solve_time: Time taken to solve
|
|
893
|
+
solver_name: Name of solver used
|
|
894
|
+
run_id: Unique run identifier
|
|
895
|
+
|
|
896
|
+
Returns:
|
|
897
|
+
Dictionary with solve results and metadata
|
|
898
|
+
"""
|
|
899
|
+
try:
|
|
900
|
+
# Extract basic solve information
|
|
901
|
+
status = getattr(result, "status", "unknown")
|
|
902
|
+
objective_value = getattr(network, "objective", None)
|
|
903
|
+
|
|
904
|
+
# Convert PyPSA result to dictionary format
|
|
905
|
+
result_dict = self._convert_pypsa_result_to_dict(result)
|
|
906
|
+
|
|
907
|
+
# Determine success based on multiple criteria
|
|
908
|
+
success = self._determine_solve_success(
|
|
909
|
+
result, network, status, objective_value
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
solve_result = {
|
|
913
|
+
"success": success,
|
|
914
|
+
"status": status,
|
|
915
|
+
"solve_time": solve_time,
|
|
916
|
+
"solver_name": solver_name,
|
|
917
|
+
"run_id": run_id,
|
|
918
|
+
"objective_value": objective_value,
|
|
919
|
+
"pypsa_result": result_dict,
|
|
920
|
+
"network_name": network.name,
|
|
921
|
+
"num_buses": len(network.buses),
|
|
922
|
+
"num_generators": len(network.generators),
|
|
923
|
+
"num_loads": len(network.loads),
|
|
924
|
+
"num_lines": len(network.lines),
|
|
925
|
+
"num_links": len(network.links),
|
|
926
|
+
"num_snapshots": len(network.snapshots),
|
|
927
|
+
}
|
|
928
|
+
|
|
929
|
+
# Add multi-period information if available
|
|
930
|
+
if hasattr(network, "_available_years") and network._available_years:
|
|
931
|
+
solve_result["years"] = network._available_years
|
|
932
|
+
solve_result["multi_period"] = len(network._available_years) > 1
|
|
933
|
+
|
|
934
|
+
return solve_result
|
|
935
|
+
|
|
936
|
+
except Exception as e:
|
|
937
|
+
logger.error(f"Failed to extract solve results: {e}")
|
|
938
|
+
return {
|
|
939
|
+
"success": False,
|
|
940
|
+
"status": "extraction_failed",
|
|
941
|
+
"error": f"Failed to extract results: {e}",
|
|
942
|
+
"solve_time": solve_time,
|
|
943
|
+
"solver_name": solver_name,
|
|
944
|
+
"run_id": run_id,
|
|
945
|
+
"objective_value": None,
|
|
946
|
+
}
|
|
947
|
+
|
|
948
|
+
def _determine_solve_success(
|
|
949
|
+
self,
|
|
950
|
+
result: Any,
|
|
951
|
+
network: "pypsa.Network",
|
|
952
|
+
status: str,
|
|
953
|
+
objective_value: Optional[float],
|
|
954
|
+
) -> bool:
|
|
955
|
+
"""
|
|
956
|
+
Determine if solve was successful based on multiple criteria.
|
|
957
|
+
|
|
958
|
+
PyPSA sometimes returns status='unknown' even for successful solves,
|
|
959
|
+
so we need to check multiple indicators.
|
|
960
|
+
"""
|
|
961
|
+
try:
|
|
962
|
+
# Check explicit status first
|
|
963
|
+
if status in ["optimal", "feasible"]:
|
|
964
|
+
return True
|
|
965
|
+
|
|
966
|
+
# Check termination condition
|
|
967
|
+
if hasattr(result, "termination_condition"):
|
|
968
|
+
term_condition = str(result.termination_condition).lower()
|
|
969
|
+
if "optimal" in term_condition:
|
|
970
|
+
return True
|
|
971
|
+
|
|
972
|
+
# Check if we have a valid objective value
|
|
973
|
+
if objective_value is not None and not (
|
|
974
|
+
objective_value == 0 and status == "unknown"
|
|
975
|
+
):
|
|
976
|
+
return True
|
|
977
|
+
|
|
978
|
+
# Check solver-specific success indicators
|
|
979
|
+
if hasattr(result, "solver"):
|
|
980
|
+
solver_info = result.solver
|
|
981
|
+
if hasattr(solver_info, "termination_condition"):
|
|
982
|
+
term_condition = str(solver_info.termination_condition).lower()
|
|
983
|
+
if "optimal" in term_condition:
|
|
984
|
+
return True
|
|
985
|
+
|
|
986
|
+
logger.warning(
|
|
987
|
+
f"Could not determine solve success: status={status}, objective={objective_value}"
|
|
988
|
+
)
|
|
989
|
+
return False
|
|
990
|
+
|
|
991
|
+
except Exception as e:
|
|
992
|
+
logger.error(f"Error determining solve success: {e}")
|
|
993
|
+
return False
|
|
994
|
+
|
|
995
|
+
def _convert_pypsa_result_to_dict(self, result) -> Dict[str, Any]:
|
|
996
|
+
"""
|
|
997
|
+
Convert PyPSA result object to dictionary.
|
|
998
|
+
|
|
999
|
+
Args:
|
|
1000
|
+
result: PyPSA solve result object
|
|
1001
|
+
|
|
1002
|
+
Returns:
|
|
1003
|
+
Dictionary representation of the result
|
|
1004
|
+
"""
|
|
1005
|
+
try:
|
|
1006
|
+
if result is None:
|
|
1007
|
+
return {"status": "no_result"}
|
|
1008
|
+
|
|
1009
|
+
result_dict = {}
|
|
1010
|
+
|
|
1011
|
+
# Extract common attributes
|
|
1012
|
+
for attr in ["status", "success", "termination_condition", "solver"]:
|
|
1013
|
+
if hasattr(result, attr):
|
|
1014
|
+
value = getattr(result, attr)
|
|
1015
|
+
# Convert to serializable format
|
|
1016
|
+
if hasattr(value, "__dict__"):
|
|
1017
|
+
result_dict[attr] = str(value)
|
|
1018
|
+
else:
|
|
1019
|
+
result_dict[attr] = value
|
|
1020
|
+
|
|
1021
|
+
# Handle solver-specific information
|
|
1022
|
+
if hasattr(result, "solver_results"):
|
|
1023
|
+
solver_results = getattr(result, "solver_results")
|
|
1024
|
+
if hasattr(solver_results, "__dict__"):
|
|
1025
|
+
result_dict["solver_results"] = str(solver_results)
|
|
1026
|
+
else:
|
|
1027
|
+
result_dict["solver_results"] = solver_results
|
|
1028
|
+
|
|
1029
|
+
return result_dict
|
|
1030
|
+
|
|
1031
|
+
except Exception as e:
|
|
1032
|
+
logger.warning(f"Failed to convert PyPSA result to dict: {e}")
|
|
1033
|
+
return {"status": "conversion_failed", "error": str(e)}
|
|
1034
|
+
|
|
1035
|
+
def _calculate_comprehensive_network_statistics(
|
|
1036
|
+
self, network: "pypsa.Network", solve_time: float, solver_name: str
|
|
1037
|
+
) -> Dict[str, Any]:
|
|
1038
|
+
"""Calculate comprehensive network statistics including PyPSA statistics and custom metrics"""
|
|
1039
|
+
try:
|
|
1040
|
+
# Initialize statistics structure
|
|
1041
|
+
statistics = {
|
|
1042
|
+
"core_summary": {},
|
|
1043
|
+
"pypsa_statistics": {},
|
|
1044
|
+
"custom_statistics": {},
|
|
1045
|
+
"runtime_info": {},
|
|
1046
|
+
"solver_info": {},
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
# Core summary statistics
|
|
1050
|
+
total_generation = 0
|
|
1051
|
+
total_demand = 0
|
|
1052
|
+
unserved_energy = 0
|
|
1053
|
+
|
|
1054
|
+
# Calculate generation statistics
|
|
1055
|
+
if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
|
|
1056
|
+
# Apply snapshot weightings to convert MW to MWh
|
|
1057
|
+
weightings = network.snapshot_weightings
|
|
1058
|
+
if isinstance(weightings, pd.DataFrame):
|
|
1059
|
+
if "objective" in weightings.columns:
|
|
1060
|
+
weighting_values = weightings["objective"].values
|
|
1061
|
+
else:
|
|
1062
|
+
weighting_values = weightings.iloc[:, 0].values
|
|
1063
|
+
else:
|
|
1064
|
+
weighting_values = weightings.values
|
|
1065
|
+
|
|
1066
|
+
total_generation = float(
|
|
1067
|
+
(network.generators_t.p.values * weighting_values[:, None]).sum()
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
# Calculate unserved energy from UNMET_LOAD generators
|
|
1071
|
+
if hasattr(network, "generators") and hasattr(
|
|
1072
|
+
network, "_component_type_map"
|
|
1073
|
+
):
|
|
1074
|
+
unmet_load_gen_names = [
|
|
1075
|
+
name
|
|
1076
|
+
for name, comp_type in network._component_type_map.items()
|
|
1077
|
+
if comp_type == "UNMET_LOAD"
|
|
1078
|
+
]
|
|
1079
|
+
|
|
1080
|
+
for gen_name in unmet_load_gen_names:
|
|
1081
|
+
if gen_name in network.generators_t.p.columns:
|
|
1082
|
+
gen_output = float(
|
|
1083
|
+
(
|
|
1084
|
+
network.generators_t.p[gen_name] * weighting_values
|
|
1085
|
+
).sum()
|
|
1086
|
+
)
|
|
1087
|
+
unserved_energy += gen_output
|
|
1088
|
+
|
|
1089
|
+
# Calculate demand statistics
|
|
1090
|
+
if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
|
|
1091
|
+
weightings = network.snapshot_weightings
|
|
1092
|
+
if isinstance(weightings, pd.DataFrame):
|
|
1093
|
+
if "objective" in weightings.columns:
|
|
1094
|
+
weighting_values = weightings["objective"].values
|
|
1095
|
+
else:
|
|
1096
|
+
weighting_values = weightings.iloc[:, 0].values
|
|
1097
|
+
else:
|
|
1098
|
+
weighting_values = weightings.values
|
|
1099
|
+
|
|
1100
|
+
total_demand = float(
|
|
1101
|
+
(network.loads_t.p.values * weighting_values[:, None]).sum()
|
|
1102
|
+
)
|
|
1103
|
+
|
|
1104
|
+
statistics["core_summary"] = {
|
|
1105
|
+
"total_generation_mwh": total_generation,
|
|
1106
|
+
"total_demand_mwh": total_demand,
|
|
1107
|
+
"total_cost": (
|
|
1108
|
+
float(network.objective) if hasattr(network, "objective") else None
|
|
1109
|
+
),
|
|
1110
|
+
"load_factor": (
|
|
1111
|
+
(total_demand / (total_generation + 1e-6))
|
|
1112
|
+
if total_generation > 0
|
|
1113
|
+
else 0
|
|
1114
|
+
),
|
|
1115
|
+
"unserved_energy_mwh": unserved_energy,
|
|
1116
|
+
}
|
|
1117
|
+
|
|
1118
|
+
# Calculate PyPSA statistics
|
|
1119
|
+
try:
|
|
1120
|
+
pypsa_stats = network.statistics()
|
|
1121
|
+
if pypsa_stats is not None and not pypsa_stats.empty:
|
|
1122
|
+
statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(
|
|
1123
|
+
pypsa_stats
|
|
1124
|
+
)
|
|
1125
|
+
else:
|
|
1126
|
+
statistics["pypsa_statistics"] = {}
|
|
1127
|
+
except Exception as e:
|
|
1128
|
+
logger.error(f"Failed to calculate PyPSA statistics: {e}")
|
|
1129
|
+
statistics["pypsa_statistics"] = {}
|
|
1130
|
+
|
|
1131
|
+
# Custom statistics - calculate detailed breakdowns
|
|
1132
|
+
total_cost = (
|
|
1133
|
+
float(network.objective) if hasattr(network, "objective") else 0.0
|
|
1134
|
+
)
|
|
1135
|
+
avg_price = (
|
|
1136
|
+
(total_cost / (total_generation + 1e-6))
|
|
1137
|
+
if total_generation > 0
|
|
1138
|
+
else None
|
|
1139
|
+
)
|
|
1140
|
+
unmet_load_percentage = (
|
|
1141
|
+
(unserved_energy / (total_demand + 1e-6)) * 100
|
|
1142
|
+
if total_demand > 0
|
|
1143
|
+
else 0
|
|
1144
|
+
)
|
|
1145
|
+
|
|
1146
|
+
# Note: For solver statistics, we keep simplified approach since this is just for logging
|
|
1147
|
+
# The storage module will calculate proper totals from carrier statistics
|
|
1148
|
+
statistics["custom_statistics"] = {
|
|
1149
|
+
"total_capital_cost": 0.0, # Will be calculated properly in storage module
|
|
1150
|
+
"total_operational_cost": total_cost, # PyPSA objective (includes both capital and operational, discounted)
|
|
1151
|
+
"total_currency_cost": total_cost,
|
|
1152
|
+
"total_emissions_tons_co2": 0.0, # Will be calculated properly in storage module
|
|
1153
|
+
"average_price_per_mwh": avg_price,
|
|
1154
|
+
"unmet_load_percentage": unmet_load_percentage,
|
|
1155
|
+
"max_unmet_load_hour_mw": 0.0, # TODO: Calculate max hourly unmet load
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
# Runtime info
|
|
1159
|
+
unmet_load_count = 0
|
|
1160
|
+
if hasattr(network, "_component_type_map"):
|
|
1161
|
+
unmet_load_count = len(
|
|
1162
|
+
[
|
|
1163
|
+
name
|
|
1164
|
+
for name, comp_type in network._component_type_map.items()
|
|
1165
|
+
if comp_type == "UNMET_LOAD"
|
|
1166
|
+
]
|
|
1167
|
+
)
|
|
1168
|
+
|
|
1169
|
+
statistics["runtime_info"] = {
|
|
1170
|
+
"solve_time_seconds": solve_time,
|
|
1171
|
+
"component_count": (
|
|
1172
|
+
(
|
|
1173
|
+
len(network.buses)
|
|
1174
|
+
+ len(network.generators)
|
|
1175
|
+
+ len(network.loads)
|
|
1176
|
+
+ len(network.lines)
|
|
1177
|
+
+ len(network.links)
|
|
1178
|
+
)
|
|
1179
|
+
if hasattr(network, "buses")
|
|
1180
|
+
else 0
|
|
1181
|
+
),
|
|
1182
|
+
"bus_count": len(network.buses) if hasattr(network, "buses") else 0,
|
|
1183
|
+
"generator_count": (
|
|
1184
|
+
len(network.generators) if hasattr(network, "generators") else 0
|
|
1185
|
+
),
|
|
1186
|
+
"unmet_load_count": unmet_load_count,
|
|
1187
|
+
"load_count": len(network.loads) if hasattr(network, "loads") else 0,
|
|
1188
|
+
"line_count": len(network.lines) if hasattr(network, "lines") else 0,
|
|
1189
|
+
"snapshot_count": (
|
|
1190
|
+
len(network.snapshots) if hasattr(network, "snapshots") else 0
|
|
1191
|
+
),
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
# Solver info
|
|
1195
|
+
statistics["solver_info"] = {
|
|
1196
|
+
"solver_name": solver_name,
|
|
1197
|
+
"termination_condition": (
|
|
1198
|
+
"optimal" if hasattr(network, "objective") else "unknown"
|
|
1199
|
+
),
|
|
1200
|
+
"objective_value": (
|
|
1201
|
+
float(network.objective) if hasattr(network, "objective") else None
|
|
1202
|
+
),
|
|
1203
|
+
}
|
|
1204
|
+
|
|
1205
|
+
return statistics
|
|
1206
|
+
|
|
1207
|
+
except Exception as e:
|
|
1208
|
+
logger.error(
|
|
1209
|
+
f"Failed to calculate comprehensive network statistics: {e}",
|
|
1210
|
+
exc_info=True,
|
|
1211
|
+
)
|
|
1212
|
+
return {
|
|
1213
|
+
"error": str(e),
|
|
1214
|
+
"core_summary": {},
|
|
1215
|
+
"pypsa_statistics": {},
|
|
1216
|
+
"custom_statistics": {},
|
|
1217
|
+
"runtime_info": {"solve_time_seconds": solve_time},
|
|
1218
|
+
"solver_info": {"solver_name": solver_name},
|
|
1219
|
+
}
|
|
1220
|
+
|
|
1221
|
+
def _calculate_statistics_by_year(
|
|
1222
|
+
self, network: "pypsa.Network", solve_time: float, solver_name: str
|
|
1223
|
+
) -> Dict[int, Dict[str, Any]]:
|
|
1224
|
+
"""Calculate statistics for each year in the network"""
|
|
1225
|
+
try:
|
|
1226
|
+
# Extract years from network snapshots or manually extracted years
|
|
1227
|
+
if hasattr(network.snapshots, "year"):
|
|
1228
|
+
years = sorted(network.snapshots.year.unique())
|
|
1229
|
+
elif hasattr(network, "_available_years"):
|
|
1230
|
+
years = network._available_years
|
|
1231
|
+
elif hasattr(network.snapshots, "levels"):
|
|
1232
|
+
# Multi-period optimization - get years from period level
|
|
1233
|
+
period_values = network.snapshots.get_level_values(0)
|
|
1234
|
+
years = sorted(period_values.unique())
|
|
1235
|
+
else:
|
|
1236
|
+
# If no year info, skip year-based calculations
|
|
1237
|
+
return {}
|
|
1238
|
+
|
|
1239
|
+
year_statistics = {}
|
|
1240
|
+
|
|
1241
|
+
for year in years:
|
|
1242
|
+
try:
|
|
1243
|
+
year_stats = self._calculate_network_statistics_for_year(
|
|
1244
|
+
network, year, solve_time, solver_name
|
|
1245
|
+
)
|
|
1246
|
+
year_statistics[year] = year_stats
|
|
1247
|
+
except Exception as e:
|
|
1248
|
+
logger.error(f"Failed to calculate statistics for year {year}: {e}")
|
|
1249
|
+
continue
|
|
1250
|
+
|
|
1251
|
+
return year_statistics
|
|
1252
|
+
|
|
1253
|
+
except Exception as e:
|
|
1254
|
+
logger.error(
|
|
1255
|
+
f"Failed to calculate year-based statistics: {e}", exc_info=True
|
|
1256
|
+
)
|
|
1257
|
+
return {}
|
|
1258
|
+
|
|
1259
|
+
def _calculate_network_statistics_for_year(
|
|
1260
|
+
self, network: "pypsa.Network", year: int, solve_time: float, solver_name: str
|
|
1261
|
+
) -> Dict[str, Any]:
|
|
1262
|
+
"""Calculate network statistics for a specific year"""
|
|
1263
|
+
try:
|
|
1264
|
+
# Initialize statistics structure
|
|
1265
|
+
statistics = {
|
|
1266
|
+
"core_summary": {},
|
|
1267
|
+
"custom_statistics": {},
|
|
1268
|
+
"runtime_info": {},
|
|
1269
|
+
"solver_info": {},
|
|
1270
|
+
}
|
|
1271
|
+
|
|
1272
|
+
# Core summary statistics for this year
|
|
1273
|
+
total_generation = 0
|
|
1274
|
+
total_demand = 0
|
|
1275
|
+
unserved_energy = 0
|
|
1276
|
+
|
|
1277
|
+
# Calculate generation statistics for this year
|
|
1278
|
+
if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
|
|
1279
|
+
# Filter by year
|
|
1280
|
+
year_generation = self._filter_timeseries_by_year(
|
|
1281
|
+
network.generators_t.p, network.snapshots, year
|
|
1282
|
+
)
|
|
1283
|
+
if year_generation is not None and not year_generation.empty:
|
|
1284
|
+
# Apply snapshot weightings for this year
|
|
1285
|
+
year_weightings = self._get_year_weightings(network, year)
|
|
1286
|
+
if year_weightings is not None:
|
|
1287
|
+
total_generation = float(
|
|
1288
|
+
(year_generation.values * year_weightings[:, None]).sum()
|
|
1289
|
+
)
|
|
1290
|
+
else:
|
|
1291
|
+
total_generation = float(year_generation.sum().sum())
|
|
1292
|
+
|
|
1293
|
+
# Calculate unserved energy for this year
|
|
1294
|
+
if hasattr(network, "_component_type_map"):
|
|
1295
|
+
unmet_load_gen_names = [
|
|
1296
|
+
name
|
|
1297
|
+
for name, comp_type in network._component_type_map.items()
|
|
1298
|
+
if comp_type == "UNMET_LOAD"
|
|
1299
|
+
]
|
|
1300
|
+
|
|
1301
|
+
for gen_name in unmet_load_gen_names:
|
|
1302
|
+
if gen_name in year_generation.columns:
|
|
1303
|
+
if year_weightings is not None:
|
|
1304
|
+
gen_output = float(
|
|
1305
|
+
(
|
|
1306
|
+
year_generation[gen_name] * year_weightings
|
|
1307
|
+
).sum()
|
|
1308
|
+
)
|
|
1309
|
+
else:
|
|
1310
|
+
gen_output = float(year_generation[gen_name].sum())
|
|
1311
|
+
unserved_energy += gen_output
|
|
1312
|
+
|
|
1313
|
+
# Calculate demand statistics for this year
|
|
1314
|
+
if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
|
|
1315
|
+
year_demand = self._filter_timeseries_by_year(
|
|
1316
|
+
network.loads_t.p, network.snapshots, year
|
|
1317
|
+
)
|
|
1318
|
+
if year_demand is not None and not year_demand.empty:
|
|
1319
|
+
year_weightings = self._get_year_weightings(network, year)
|
|
1320
|
+
if year_weightings is not None:
|
|
1321
|
+
total_demand = float(
|
|
1322
|
+
(year_demand.values * year_weightings[:, None]).sum()
|
|
1323
|
+
)
|
|
1324
|
+
else:
|
|
1325
|
+
total_demand = float(year_demand.sum().sum())
|
|
1326
|
+
|
|
1327
|
+
statistics["core_summary"] = {
|
|
1328
|
+
"total_generation_mwh": total_generation,
|
|
1329
|
+
"total_demand_mwh": total_demand,
|
|
1330
|
+
"total_cost": None, # Year-specific cost calculation would be complex
|
|
1331
|
+
"load_factor": (
|
|
1332
|
+
(total_demand / (total_generation + 1e-6))
|
|
1333
|
+
if total_generation > 0
|
|
1334
|
+
else 0
|
|
1335
|
+
),
|
|
1336
|
+
"unserved_energy_mwh": unserved_energy,
|
|
1337
|
+
}
|
|
1338
|
+
|
|
1339
|
+
# Custom statistics
|
|
1340
|
+
unmet_load_percentage = (
|
|
1341
|
+
(unserved_energy / (total_demand + 1e-6)) * 100
|
|
1342
|
+
if total_demand > 0
|
|
1343
|
+
else 0
|
|
1344
|
+
)
|
|
1345
|
+
|
|
1346
|
+
# Calculate year-specific carrier statistics
|
|
1347
|
+
year_carrier_stats = self._calculate_year_carrier_statistics(network, year)
|
|
1348
|
+
|
|
1349
|
+
statistics["custom_statistics"] = {
|
|
1350
|
+
"unmet_load_percentage": unmet_load_percentage,
|
|
1351
|
+
"year": year,
|
|
1352
|
+
**year_carrier_stats, # Include all carrier-specific statistics for this year
|
|
1353
|
+
}
|
|
1354
|
+
|
|
1355
|
+
# Runtime info
|
|
1356
|
+
year_snapshot_count = self._count_year_snapshots(network.snapshots, year)
|
|
1357
|
+
|
|
1358
|
+
statistics["runtime_info"] = {
|
|
1359
|
+
"solve_time_seconds": solve_time,
|
|
1360
|
+
"year": year,
|
|
1361
|
+
"snapshot_count": year_snapshot_count,
|
|
1362
|
+
}
|
|
1363
|
+
|
|
1364
|
+
# Solver info
|
|
1365
|
+
statistics["solver_info"] = {"solver_name": solver_name, "year": year}
|
|
1366
|
+
|
|
1367
|
+
return statistics
|
|
1368
|
+
|
|
1369
|
+
except Exception as e:
|
|
1370
|
+
logger.error(
|
|
1371
|
+
f"Failed to calculate network statistics for year {year}: {e}",
|
|
1372
|
+
exc_info=True,
|
|
1373
|
+
)
|
|
1374
|
+
return {
|
|
1375
|
+
"error": str(e),
|
|
1376
|
+
"core_summary": {},
|
|
1377
|
+
"custom_statistics": {"year": year},
|
|
1378
|
+
"runtime_info": {"solve_time_seconds": solve_time, "year": year},
|
|
1379
|
+
"solver_info": {"solver_name": solver_name, "year": year},
|
|
1380
|
+
}
|
|
1381
|
+
|
|
1382
|
+
def _filter_timeseries_by_year(
|
|
1383
|
+
self, timeseries_df: "pd.DataFrame", snapshots: "pd.Index", year: int
|
|
1384
|
+
) -> "pd.DataFrame":
|
|
1385
|
+
"""Filter timeseries data by year"""
|
|
1386
|
+
try:
|
|
1387
|
+
# Handle MultiIndex case (multi-period optimization)
|
|
1388
|
+
if hasattr(snapshots, "levels"):
|
|
1389
|
+
period_values = snapshots.get_level_values(0)
|
|
1390
|
+
year_mask = period_values == year
|
|
1391
|
+
if year_mask.any():
|
|
1392
|
+
year_snapshots = snapshots[year_mask]
|
|
1393
|
+
return timeseries_df.loc[year_snapshots]
|
|
1394
|
+
|
|
1395
|
+
# Handle DatetimeIndex case (regular time series)
|
|
1396
|
+
elif hasattr(snapshots, "year"):
|
|
1397
|
+
year_mask = snapshots.year == year
|
|
1398
|
+
if year_mask.any():
|
|
1399
|
+
return timeseries_df.loc[year_mask]
|
|
1400
|
+
|
|
1401
|
+
# Fallback - return None if can't filter
|
|
1402
|
+
return None
|
|
1403
|
+
|
|
1404
|
+
except Exception as e:
|
|
1405
|
+
logger.error(f"Failed to filter timeseries by year {year}: {e}")
|
|
1406
|
+
return None
|
|
1407
|
+
|
|
1408
|
+
def _get_year_weightings(self, network: "pypsa.Network", year: int) -> "np.ndarray":
|
|
1409
|
+
"""Get snapshot weightings for a specific year"""
|
|
1410
|
+
try:
|
|
1411
|
+
# Filter snapshot weightings by year
|
|
1412
|
+
if hasattr(network.snapshots, "levels"):
|
|
1413
|
+
period_values = network.snapshots.get_level_values(0)
|
|
1414
|
+
year_mask = period_values == year
|
|
1415
|
+
if year_mask.any():
|
|
1416
|
+
year_snapshots = network.snapshots[year_mask]
|
|
1417
|
+
year_weightings = network.snapshot_weightings.loc[year_snapshots]
|
|
1418
|
+
if isinstance(year_weightings, pd.DataFrame):
|
|
1419
|
+
if "objective" in year_weightings.columns:
|
|
1420
|
+
return year_weightings["objective"].values
|
|
1421
|
+
else:
|
|
1422
|
+
return year_weightings.iloc[:, 0].values
|
|
1423
|
+
else:
|
|
1424
|
+
return year_weightings.values
|
|
1425
|
+
|
|
1426
|
+
elif hasattr(network.snapshots, "year"):
|
|
1427
|
+
year_mask = network.snapshots.year == year
|
|
1428
|
+
if year_mask.any():
|
|
1429
|
+
year_weightings = network.snapshot_weightings.loc[year_mask]
|
|
1430
|
+
if isinstance(year_weightings, pd.DataFrame):
|
|
1431
|
+
if "objective" in year_weightings.columns:
|
|
1432
|
+
return year_weightings["objective"].values
|
|
1433
|
+
else:
|
|
1434
|
+
return year_weightings.iloc[:, 0].values
|
|
1435
|
+
else:
|
|
1436
|
+
return year_weightings.values
|
|
1437
|
+
|
|
1438
|
+
return None
|
|
1439
|
+
|
|
1440
|
+
except Exception as e:
|
|
1441
|
+
logger.error(f"Failed to get year weightings for year {year}: {e}")
|
|
1442
|
+
return None
|
|
1443
|
+
|
|
1444
|
+
def _count_year_snapshots(self, snapshots: "pd.Index", year: int) -> int:
|
|
1445
|
+
"""Count snapshots for a specific year"""
|
|
1446
|
+
try:
|
|
1447
|
+
# Handle MultiIndex case
|
|
1448
|
+
if hasattr(snapshots, "levels"):
|
|
1449
|
+
period_values = snapshots.get_level_values(0)
|
|
1450
|
+
year_mask = period_values == year
|
|
1451
|
+
return year_mask.sum()
|
|
1452
|
+
|
|
1453
|
+
# Handle DatetimeIndex case
|
|
1454
|
+
elif hasattr(snapshots, "year"):
|
|
1455
|
+
year_mask = snapshots.year == year
|
|
1456
|
+
return year_mask.sum()
|
|
1457
|
+
|
|
1458
|
+
# Fallback
|
|
1459
|
+
return 0
|
|
1460
|
+
|
|
1461
|
+
except Exception as e:
|
|
1462
|
+
logger.error(f"Failed to count snapshots for year {year}: {e}")
|
|
1463
|
+
return 0
|
|
1464
|
+
|
|
1465
|
+
def _calculate_year_carrier_statistics(
|
|
1466
|
+
self, network: "pypsa.Network", year: int
|
|
1467
|
+
) -> Dict[str, Any]:
|
|
1468
|
+
"""Calculate carrier-specific statistics for a specific year"""
|
|
1469
|
+
# Note: This is a simplified implementation that doesn't have database access
|
|
1470
|
+
# The proper implementation should be done in the storage module where we have conn
|
|
1471
|
+
# For now, return empty dictionaries - the storage module will handle this properly
|
|
1472
|
+
return {
|
|
1473
|
+
"dispatch_by_carrier": {},
|
|
1474
|
+
"capacity_by_carrier": {},
|
|
1475
|
+
"emissions_by_carrier": {},
|
|
1476
|
+
"capital_cost_by_carrier": {},
|
|
1477
|
+
"operational_cost_by_carrier": {},
|
|
1478
|
+
"total_system_cost_by_carrier": {},
|
|
1479
|
+
}
|
|
1480
|
+
|
|
1481
|
+
def _get_generator_carrier_name(self, generator_name: str) -> Optional[str]:
|
|
1482
|
+
"""Get carrier name for a generator - simplified implementation"""
|
|
1483
|
+
# This is a simplified approach - in practice, this should query the database
|
|
1484
|
+
# or use the component type mapping from the network
|
|
1485
|
+
|
|
1486
|
+
# Try to extract carrier from generator name patterns
|
|
1487
|
+
gen_lower = generator_name.lower()
|
|
1488
|
+
|
|
1489
|
+
if "coal" in gen_lower:
|
|
1490
|
+
return "coal"
|
|
1491
|
+
elif "gas" in gen_lower or "ccgt" in gen_lower or "ocgt" in gen_lower:
|
|
1492
|
+
return "gas"
|
|
1493
|
+
elif "nuclear" in gen_lower:
|
|
1494
|
+
return "nuclear"
|
|
1495
|
+
elif "solar" in gen_lower or "pv" in gen_lower:
|
|
1496
|
+
return "solar"
|
|
1497
|
+
elif "wind" in gen_lower:
|
|
1498
|
+
return "wind"
|
|
1499
|
+
elif "hydro" in gen_lower:
|
|
1500
|
+
return "hydro"
|
|
1501
|
+
elif "biomass" in gen_lower:
|
|
1502
|
+
return "biomass"
|
|
1503
|
+
elif "battery" in gen_lower:
|
|
1504
|
+
return "battery"
|
|
1505
|
+
elif "unmet" in gen_lower:
|
|
1506
|
+
return "Unmet Load"
|
|
1507
|
+
else:
|
|
1508
|
+
# Default to generator name if no pattern matches
|
|
1509
|
+
return generator_name
|