pyconvexity 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyconvexity/__init__.py +87 -46
- pyconvexity/_version.py +1 -1
- pyconvexity/core/__init__.py +3 -5
- pyconvexity/core/database.py +111 -103
- pyconvexity/core/errors.py +16 -10
- pyconvexity/core/types.py +61 -54
- pyconvexity/data/__init__.py +0 -1
- pyconvexity/data/loaders/cache.py +65 -64
- pyconvexity/data/schema/01_core_schema.sql +134 -234
- pyconvexity/data/schema/02_data_metadata.sql +38 -168
- pyconvexity/data/schema/03_validation_data.sql +327 -264
- pyconvexity/data/sources/gem.py +169 -139
- pyconvexity/io/__init__.py +4 -10
- pyconvexity/io/excel_exporter.py +694 -480
- pyconvexity/io/excel_importer.py +817 -545
- pyconvexity/io/netcdf_exporter.py +66 -61
- pyconvexity/io/netcdf_importer.py +850 -619
- pyconvexity/models/__init__.py +109 -59
- pyconvexity/models/attributes.py +197 -178
- pyconvexity/models/carriers.py +70 -67
- pyconvexity/models/components.py +260 -236
- pyconvexity/models/network.py +202 -284
- pyconvexity/models/results.py +65 -55
- pyconvexity/models/scenarios.py +58 -88
- pyconvexity/solvers/__init__.py +5 -5
- pyconvexity/solvers/pypsa/__init__.py +3 -3
- pyconvexity/solvers/pypsa/api.py +150 -134
- pyconvexity/solvers/pypsa/batch_loader.py +165 -162
- pyconvexity/solvers/pypsa/builder.py +390 -291
- pyconvexity/solvers/pypsa/constraints.py +184 -162
- pyconvexity/solvers/pypsa/solver.py +968 -663
- pyconvexity/solvers/pypsa/storage.py +1377 -671
- pyconvexity/timeseries.py +63 -60
- pyconvexity/validation/__init__.py +14 -6
- pyconvexity/validation/rules.py +95 -84
- pyconvexity-0.4.1.dist-info/METADATA +46 -0
- pyconvexity-0.4.1.dist-info/RECORD +42 -0
- pyconvexity/data/schema/04_scenario_schema.sql +0 -122
- pyconvexity/data/schema/migrate_add_geometries.sql +0 -73
- pyconvexity-0.4.0.dist-info/METADATA +0 -138
- pyconvexity-0.4.0.dist-info/RECORD +0 -44
- {pyconvexity-0.4.0.dist-info → pyconvexity-0.4.1.dist-info}/WHEEL +0 -0
- {pyconvexity-0.4.0.dist-info → pyconvexity-0.4.1.dist-info}/top_level.txt +0 -0
|
@@ -17,96 +17,121 @@ logger = logging.getLogger(__name__)
|
|
|
17
17
|
class NetworkSolver:
|
|
18
18
|
"""
|
|
19
19
|
Simplified PyPSA network solver that always uses multi-period optimization.
|
|
20
|
-
|
|
20
|
+
|
|
21
21
|
This ensures consistent behavior for both single-year and multi-year models.
|
|
22
22
|
"""
|
|
23
|
-
|
|
23
|
+
|
|
24
24
|
def __init__(self):
|
|
25
25
|
# Import PyPSA with error handling
|
|
26
26
|
try:
|
|
27
27
|
import pypsa
|
|
28
|
+
|
|
28
29
|
self.pypsa = pypsa
|
|
29
30
|
except ImportError as e:
|
|
30
31
|
raise ImportError(
|
|
31
32
|
"PyPSA is not installed or could not be imported. "
|
|
32
33
|
"Please ensure it is installed correctly in the environment."
|
|
33
34
|
) from e
|
|
34
|
-
|
|
35
|
+
|
|
35
36
|
def _get_user_settings_path(self):
|
|
36
37
|
"""Get the path to the user settings file (same location as Tauri uses)"""
|
|
37
38
|
try:
|
|
38
39
|
import platform
|
|
39
40
|
import os
|
|
40
41
|
from pathlib import Path
|
|
41
|
-
|
|
42
|
+
|
|
42
43
|
system = platform.system()
|
|
43
44
|
if system == "Darwin": # macOS
|
|
44
45
|
home = Path.home()
|
|
45
|
-
app_data_dir =
|
|
46
|
+
app_data_dir = (
|
|
47
|
+
home / "Library" / "Application Support" / "com.convexity.desktop"
|
|
48
|
+
)
|
|
46
49
|
elif system == "Windows":
|
|
47
|
-
app_data_dir =
|
|
50
|
+
app_data_dir = (
|
|
51
|
+
Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
|
|
52
|
+
)
|
|
48
53
|
else: # Linux
|
|
49
54
|
home = Path.home()
|
|
50
55
|
app_data_dir = home / ".local" / "share" / "com.convexity.desktop"
|
|
51
|
-
|
|
56
|
+
|
|
52
57
|
settings_file = app_data_dir / "user_settings.json"
|
|
53
58
|
return settings_file if settings_file.exists() else None
|
|
54
|
-
|
|
59
|
+
|
|
55
60
|
except Exception as e:
|
|
56
61
|
logger.warning(f"Failed to determine user settings path: {e}")
|
|
57
62
|
return None
|
|
58
|
-
|
|
63
|
+
|
|
59
64
|
def _resolve_default_solver(self) -> str:
|
|
60
65
|
"""Resolve 'default' solver to user's preferred solver"""
|
|
61
66
|
try:
|
|
62
67
|
import json
|
|
63
|
-
|
|
68
|
+
|
|
64
69
|
settings_path = self._get_user_settings_path()
|
|
65
70
|
if not settings_path:
|
|
66
|
-
logger.debug(
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
71
|
+
logger.debug(
|
|
72
|
+
"User settings file not found, using 'highs' as default solver"
|
|
73
|
+
)
|
|
74
|
+
return "highs"
|
|
75
|
+
|
|
76
|
+
with open(settings_path, "r") as f:
|
|
70
77
|
user_settings = json.load(f)
|
|
71
|
-
|
|
78
|
+
|
|
72
79
|
# Get default solver from user settings
|
|
73
|
-
default_solver = user_settings.get(
|
|
80
|
+
default_solver = user_settings.get("default_solver", "highs")
|
|
74
81
|
logger.info(f"📖 Read default solver from user settings: {default_solver}")
|
|
75
|
-
|
|
82
|
+
|
|
76
83
|
# Validate that it's a known solver
|
|
77
|
-
known_solvers = [
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
+
known_solvers = [
|
|
85
|
+
"highs",
|
|
86
|
+
"gurobi",
|
|
87
|
+
"gurobi (barrier)",
|
|
88
|
+
"gurobi (barrier homogeneous)",
|
|
89
|
+
"gurobi (barrier+crossover balanced)",
|
|
90
|
+
"gurobi (dual simplex)",
|
|
91
|
+
"mosek",
|
|
92
|
+
"mosek (default)",
|
|
93
|
+
"mosek (barrier)",
|
|
94
|
+
"mosek (barrier+crossover)",
|
|
95
|
+
"mosek (dual simplex)",
|
|
96
|
+
"copt",
|
|
97
|
+
"copt (barrier)",
|
|
98
|
+
"copt (barrier homogeneous)",
|
|
99
|
+
"copt (barrier+crossover)",
|
|
100
|
+
"copt (dual simplex)",
|
|
101
|
+
"copt (concurrent)",
|
|
102
|
+
"cplex",
|
|
103
|
+
"glpk",
|
|
104
|
+
"cbc",
|
|
105
|
+
"scip",
|
|
106
|
+
]
|
|
107
|
+
|
|
84
108
|
if default_solver in known_solvers:
|
|
85
109
|
return default_solver
|
|
86
110
|
else:
|
|
87
|
-
logger.warning(
|
|
88
|
-
|
|
89
|
-
|
|
111
|
+
logger.warning(
|
|
112
|
+
f"Unknown default solver '{default_solver}' in user settings, falling back to 'highs'"
|
|
113
|
+
)
|
|
114
|
+
return "highs"
|
|
115
|
+
|
|
90
116
|
except Exception as e:
|
|
91
117
|
logger.warning(f"Failed to read default solver from user settings: {e}")
|
|
92
|
-
return
|
|
93
|
-
|
|
118
|
+
return "highs"
|
|
119
|
+
|
|
94
120
|
def solve_network(
|
|
95
121
|
self,
|
|
96
|
-
network:
|
|
122
|
+
network: "pypsa.Network",
|
|
97
123
|
solver_name: str = "highs",
|
|
98
124
|
solver_options: Optional[Dict[str, Any]] = None,
|
|
99
125
|
discount_rate: Optional[float] = None,
|
|
100
126
|
job_id: Optional[str] = None,
|
|
101
127
|
conn=None,
|
|
102
|
-
network_id: Optional[int] = None,
|
|
103
128
|
scenario_id: Optional[int] = None,
|
|
104
129
|
constraint_applicator=None,
|
|
105
|
-
custom_solver_config: Optional[Dict[str, Any]] = None
|
|
130
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
106
131
|
) -> Dict[str, Any]:
|
|
107
132
|
"""
|
|
108
133
|
Solve PyPSA network and return results.
|
|
109
|
-
|
|
134
|
+
|
|
110
135
|
Args:
|
|
111
136
|
network: PyPSA Network object to solve
|
|
112
137
|
solver_name: Solver to use (default: "highs"). Use "custom" for custom_solver_config.
|
|
@@ -116,71 +141,91 @@ class NetworkSolver:
|
|
|
116
141
|
custom_solver_config: Optional custom solver configuration when solver_name="custom"
|
|
117
142
|
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
118
143
|
Example: {"solver": "gurobi", "solver_options": {"Method": 2, "Crossover": 0}}
|
|
119
|
-
|
|
144
|
+
|
|
120
145
|
Returns:
|
|
121
146
|
Dictionary with solve results and metadata
|
|
122
|
-
|
|
147
|
+
|
|
123
148
|
Raises:
|
|
124
149
|
ImportError: If PyPSA is not available
|
|
125
150
|
Exception: If solving fails
|
|
126
151
|
"""
|
|
127
152
|
start_time = time.time()
|
|
128
153
|
run_id = str(uuid.uuid4())
|
|
129
|
-
|
|
154
|
+
|
|
130
155
|
logger.info(f"Starting network solve with {solver_name}")
|
|
131
|
-
|
|
156
|
+
|
|
132
157
|
try:
|
|
133
158
|
# Get solver configuration
|
|
134
|
-
actual_solver_name, solver_config = self._get_solver_config(
|
|
135
|
-
|
|
159
|
+
actual_solver_name, solver_config = self._get_solver_config(
|
|
160
|
+
solver_name, solver_options, custom_solver_config
|
|
161
|
+
)
|
|
162
|
+
|
|
136
163
|
# Resolve discount rate - fallback to 0.0 if None
|
|
137
164
|
# Note: API layer (api.py) handles fetching from network_config before calling this
|
|
138
|
-
effective_discount_rate =
|
|
165
|
+
effective_discount_rate = (
|
|
166
|
+
discount_rate if discount_rate is not None else 0.0
|
|
167
|
+
)
|
|
139
168
|
logger.info(f"Discount rate for solve: {effective_discount_rate}")
|
|
140
|
-
|
|
169
|
+
|
|
141
170
|
years = list(network.investment_periods)
|
|
142
|
-
|
|
171
|
+
|
|
143
172
|
logger.info(f"Multi-period optimization with {len(years)} periods: {years}")
|
|
144
|
-
|
|
173
|
+
|
|
145
174
|
# Calculate investment period weightings with discount rate
|
|
146
175
|
self._calculate_investment_weightings(network, effective_discount_rate)
|
|
147
|
-
|
|
176
|
+
|
|
148
177
|
# Set snapshot weightings after multi-period setup
|
|
149
|
-
if conn
|
|
150
|
-
self._set_snapshot_weightings_after_multiperiod(conn,
|
|
151
|
-
|
|
178
|
+
if conn:
|
|
179
|
+
self._set_snapshot_weightings_after_multiperiod(conn, network)
|
|
180
|
+
|
|
152
181
|
# Prepare optimization constraints - ONLY model constraints
|
|
153
182
|
# Network constraints were already applied before solve in api.py
|
|
154
183
|
extra_functionality = None
|
|
155
184
|
model_constraints = []
|
|
156
|
-
|
|
157
|
-
if conn and
|
|
158
|
-
optimization_constraints =
|
|
185
|
+
|
|
186
|
+
if conn and constraint_applicator:
|
|
187
|
+
optimization_constraints = (
|
|
188
|
+
constraint_applicator.get_optimization_constraints(
|
|
189
|
+
conn, scenario_id
|
|
190
|
+
)
|
|
191
|
+
)
|
|
159
192
|
if optimization_constraints:
|
|
160
|
-
logger.info(
|
|
161
|
-
|
|
193
|
+
logger.info(
|
|
194
|
+
f"Found {len(optimization_constraints)} optimization constraints"
|
|
195
|
+
)
|
|
196
|
+
|
|
162
197
|
# Filter for model constraints only (network constraints already applied)
|
|
163
198
|
for constraint in optimization_constraints:
|
|
164
|
-
constraint_code = constraint.get(
|
|
199
|
+
constraint_code = constraint.get("constraint_code", "")
|
|
165
200
|
constraint_type = self._detect_constraint_type(constraint_code)
|
|
166
|
-
constraint_name = constraint.get(
|
|
167
|
-
|
|
201
|
+
constraint_name = constraint.get("name", "unknown")
|
|
202
|
+
|
|
168
203
|
if constraint_type == "model_constraint":
|
|
169
204
|
model_constraints.append(constraint)
|
|
170
|
-
logger.info(
|
|
205
|
+
logger.info(
|
|
206
|
+
f"Will apply model constraint during solve: {constraint_name}"
|
|
207
|
+
)
|
|
171
208
|
else:
|
|
172
|
-
logger.info(
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
209
|
+
logger.info(
|
|
210
|
+
f"Skipping network constraint (already applied): {constraint_name}"
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
logger.info(
|
|
214
|
+
f"Will apply {len(model_constraints)} model constraints during optimization"
|
|
215
|
+
)
|
|
216
|
+
|
|
176
217
|
# Create extra_functionality for model constraints only
|
|
177
218
|
if model_constraints:
|
|
178
|
-
extra_functionality = self._create_extra_functionality(
|
|
179
|
-
|
|
180
|
-
|
|
219
|
+
extra_functionality = self._create_extra_functionality(
|
|
220
|
+
model_constraints, constraint_applicator
|
|
221
|
+
)
|
|
222
|
+
logger.info(
|
|
223
|
+
f"Prepared {len(model_constraints)} model constraints for optimization-time application"
|
|
224
|
+
)
|
|
225
|
+
|
|
181
226
|
# NOTE: Model constraints are applied DURING solve via extra_functionality
|
|
182
227
|
# Network constraints were already applied to the network structure before solve
|
|
183
|
-
|
|
228
|
+
|
|
184
229
|
# Solver diagnostics
|
|
185
230
|
logger.info(f"=== PYPSA SOLVER DIAGNOSTICS ===")
|
|
186
231
|
logger.info(f"Solver: {actual_solver_name}")
|
|
@@ -189,469 +234,560 @@ class NetworkSolver:
|
|
|
189
234
|
if solver_config:
|
|
190
235
|
logger.info(f"Solver options: {solver_config}")
|
|
191
236
|
logger.info(f"=== END PYPSA SOLVER DIAGNOSTICS ===")
|
|
192
|
-
|
|
237
|
+
|
|
193
238
|
# Always solve with multi-period optimization
|
|
194
|
-
logger.info(
|
|
195
|
-
|
|
239
|
+
logger.info(
|
|
240
|
+
f"Solving network with multi-period optimization using {actual_solver_name}"
|
|
241
|
+
)
|
|
242
|
+
|
|
196
243
|
# DEBUG: Check network structure before solving
|
|
197
244
|
logger.info(f"DEBUG: Network snapshots type: {type(network.snapshots)}")
|
|
198
|
-
logger.info(
|
|
245
|
+
logger.info(
|
|
246
|
+
f"DEBUG: Network snapshots names: {getattr(network.snapshots, 'names', 'No names')}"
|
|
247
|
+
)
|
|
199
248
|
logger.info(f"DEBUG: Network snapshots shape: {len(network.snapshots)}")
|
|
200
249
|
logger.info(f"DEBUG: First 3 snapshots: {network.snapshots[:3].tolist()}")
|
|
201
|
-
|
|
250
|
+
|
|
202
251
|
# Check some timeseries data structure
|
|
203
|
-
if hasattr(network,
|
|
252
|
+
if hasattr(network, "generators_t") and hasattr(
|
|
253
|
+
network.generators_t, "p_max_pu"
|
|
254
|
+
):
|
|
204
255
|
if not network.generators_t.p_max_pu.empty:
|
|
205
|
-
logger.info(
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
logger.info(
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
256
|
+
logger.info(
|
|
257
|
+
f"DEBUG: generators_t.p_max_pu type: {type(network.generators_t.p_max_pu)}"
|
|
258
|
+
)
|
|
259
|
+
logger.info(
|
|
260
|
+
f"DEBUG: generators_t.p_max_pu index type: {type(network.generators_t.p_max_pu.index)}"
|
|
261
|
+
)
|
|
262
|
+
logger.info(
|
|
263
|
+
f"DEBUG: generators_t.p_max_pu index names: {getattr(network.generators_t.p_max_pu.index, 'names', 'No names')}"
|
|
264
|
+
)
|
|
265
|
+
logger.info(
|
|
266
|
+
f"DEBUG: generators_t.p_max_pu shape: {network.generators_t.p_max_pu.shape}"
|
|
267
|
+
)
|
|
268
|
+
logger.info(
|
|
269
|
+
f"DEBUG: First 3 p_max_pu index values: {network.generators_t.p_max_pu.index[:3].tolist()}"
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
if hasattr(network, "loads_t") and hasattr(network.loads_t, "p_set"):
|
|
212
273
|
if not network.loads_t.p_set.empty:
|
|
213
|
-
logger.info(
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
logger.info(
|
|
217
|
-
|
|
218
|
-
|
|
274
|
+
logger.info(
|
|
275
|
+
f"DEBUG: loads_t.p_set type: {type(network.loads_t.p_set)}"
|
|
276
|
+
)
|
|
277
|
+
logger.info(
|
|
278
|
+
f"DEBUG: loads_t.p_set index type: {type(network.loads_t.p_set.index)}"
|
|
279
|
+
)
|
|
280
|
+
logger.info(
|
|
281
|
+
f"DEBUG: loads_t.p_set index names: {getattr(network.loads_t.p_set.index, 'names', 'No names')}"
|
|
282
|
+
)
|
|
283
|
+
logger.info(
|
|
284
|
+
f"DEBUG: loads_t.p_set shape: {network.loads_t.p_set.shape}"
|
|
285
|
+
)
|
|
286
|
+
logger.info(
|
|
287
|
+
f"DEBUG: First 3 p_set index values: {network.loads_t.p_set.index[:3].tolist()}"
|
|
288
|
+
)
|
|
289
|
+
|
|
219
290
|
if solver_config:
|
|
220
|
-
result = network.optimize(
|
|
221
|
-
|
|
291
|
+
result = network.optimize(
|
|
292
|
+
solver_name=actual_solver_name,
|
|
293
|
+
multi_investment_periods=True,
|
|
294
|
+
extra_functionality=extra_functionality,
|
|
295
|
+
**solver_config,
|
|
296
|
+
)
|
|
222
297
|
else:
|
|
223
|
-
result = network.optimize(
|
|
224
|
-
|
|
225
|
-
|
|
298
|
+
result = network.optimize(
|
|
299
|
+
solver_name=actual_solver_name,
|
|
300
|
+
multi_investment_periods=True,
|
|
301
|
+
extra_functionality=extra_functionality,
|
|
302
|
+
)
|
|
303
|
+
|
|
226
304
|
solve_time = time.time() - start_time
|
|
227
|
-
|
|
305
|
+
|
|
228
306
|
# Post-solve debug logging (matches old code)
|
|
229
|
-
objective_value = getattr(network,
|
|
307
|
+
objective_value = getattr(network, "objective", None)
|
|
230
308
|
if objective_value is not None:
|
|
231
309
|
logger.info(f"[DEBUG] POST-SOLVE snapshot_weightings structure:")
|
|
232
|
-
if hasattr(network,
|
|
310
|
+
if hasattr(network, "snapshot_weightings"):
|
|
233
311
|
logger.info(f"[DEBUG] Type: {type(network.snapshot_weightings)}")
|
|
234
|
-
logger.info(
|
|
312
|
+
logger.info(
|
|
313
|
+
f"[DEBUG] Columns: {list(network.snapshot_weightings.columns)}"
|
|
314
|
+
)
|
|
235
315
|
logger.info(f"[DEBUG] Shape: {network.snapshot_weightings.shape}")
|
|
236
|
-
logger.info(
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
316
|
+
logger.info(
|
|
317
|
+
f"[DEBUG] Unique values in objective column: {network.snapshot_weightings['objective'].unique()}"
|
|
318
|
+
)
|
|
319
|
+
logger.info(
|
|
320
|
+
f"[DEBUG] Sum of objective column: {network.snapshot_weightings['objective'].sum()}"
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
if hasattr(network, "investment_period_weightings"):
|
|
240
324
|
logger.info(f"[DEBUG] investment_period_weightings exists:")
|
|
241
|
-
logger.info(
|
|
242
|
-
|
|
243
|
-
|
|
325
|
+
logger.info(
|
|
326
|
+
f"[DEBUG] Type: {type(network.investment_period_weightings)}"
|
|
327
|
+
)
|
|
328
|
+
logger.info(
|
|
329
|
+
f"[DEBUG] Content:\n{network.investment_period_weightings}"
|
|
330
|
+
)
|
|
331
|
+
|
|
244
332
|
# Extract solve results with comprehensive statistics
|
|
245
|
-
solve_result = self._extract_solve_results(
|
|
246
|
-
|
|
333
|
+
solve_result = self._extract_solve_results(
|
|
334
|
+
network, result, solve_time, actual_solver_name, run_id
|
|
335
|
+
)
|
|
336
|
+
|
|
247
337
|
# Calculate comprehensive network statistics (all years combined)
|
|
248
|
-
if solve_result.get(
|
|
338
|
+
if solve_result.get("success"):
|
|
249
339
|
logger.info("Calculating comprehensive network statistics...")
|
|
250
|
-
network_statistics = self._calculate_comprehensive_network_statistics(
|
|
251
|
-
|
|
252
|
-
|
|
340
|
+
network_statistics = self._calculate_comprehensive_network_statistics(
|
|
341
|
+
network, solve_time, actual_solver_name
|
|
342
|
+
)
|
|
343
|
+
solve_result["network_statistics"] = network_statistics
|
|
344
|
+
|
|
253
345
|
# Calculate year-based statistics for capacity expansion analysis
|
|
254
346
|
logger.info("Calculating year-based statistics...")
|
|
255
|
-
year_statistics = self._calculate_statistics_by_year(
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
347
|
+
year_statistics = self._calculate_statistics_by_year(
|
|
348
|
+
network, solve_time, actual_solver_name
|
|
349
|
+
)
|
|
350
|
+
solve_result["year_statistics"] = year_statistics
|
|
351
|
+
solve_result["year_statistics_available"] = len(year_statistics) > 0
|
|
352
|
+
|
|
353
|
+
logger.info(
|
|
354
|
+
f"Solve completed in {solve_time:.2f} seconds with status: {solve_result['status']}"
|
|
355
|
+
)
|
|
260
356
|
logger.info(f"PyPSA result object: {result}")
|
|
261
|
-
logger.info(
|
|
262
|
-
|
|
357
|
+
logger.info(
|
|
358
|
+
f"PyPSA result status: {getattr(result, 'status', 'no status attr')}"
|
|
359
|
+
)
|
|
360
|
+
logger.info(
|
|
361
|
+
f"Network objective: {getattr(network, 'objective', 'no objective')}"
|
|
362
|
+
)
|
|
263
363
|
logger.info(f"Solve result success: {solve_result.get('success')}")
|
|
264
|
-
|
|
364
|
+
|
|
265
365
|
return solve_result
|
|
266
|
-
|
|
366
|
+
|
|
267
367
|
except Exception as e:
|
|
268
368
|
solve_time = time.time() - start_time
|
|
269
369
|
logger.error(f"Solve failed after {solve_time:.2f} seconds: {e}")
|
|
270
370
|
logger.exception("Full solve error traceback:")
|
|
271
|
-
|
|
371
|
+
|
|
272
372
|
return {
|
|
273
373
|
"success": False,
|
|
274
374
|
"status": "failed",
|
|
275
375
|
"error": str(e),
|
|
276
376
|
"solve_time": solve_time,
|
|
277
|
-
"solver_name":
|
|
377
|
+
"solver_name": (
|
|
378
|
+
actual_solver_name
|
|
379
|
+
if "actual_solver_name" in locals()
|
|
380
|
+
else solver_name
|
|
381
|
+
),
|
|
278
382
|
"run_id": run_id,
|
|
279
|
-
"objective_value": None
|
|
383
|
+
"objective_value": None,
|
|
280
384
|
}
|
|
281
|
-
|
|
282
|
-
def _get_solver_config(
|
|
283
|
-
|
|
385
|
+
|
|
386
|
+
def _get_solver_config(
|
|
387
|
+
self,
|
|
388
|
+
solver_name: str,
|
|
389
|
+
solver_options: Optional[Dict[str, Any]] = None,
|
|
390
|
+
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
391
|
+
) -> tuple[str, Optional[Dict[str, Any]]]:
|
|
284
392
|
"""
|
|
285
393
|
Get the actual solver name and options for special solver configurations.
|
|
286
|
-
|
|
394
|
+
|
|
287
395
|
Args:
|
|
288
396
|
solver_name: The solver name (e.g., 'gurobi (barrier)', 'highs', 'custom')
|
|
289
397
|
solver_options: Optional additional solver options
|
|
290
398
|
custom_solver_config: Optional custom solver configuration for solver_name='custom'
|
|
291
399
|
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
292
|
-
|
|
400
|
+
|
|
293
401
|
Returns:
|
|
294
402
|
Tuple of (actual_solver_name, solver_options_dict)
|
|
295
403
|
"""
|
|
296
404
|
# Handle "custom" solver with custom configuration
|
|
297
|
-
if solver_name ==
|
|
405
|
+
if solver_name == "custom":
|
|
298
406
|
if not custom_solver_config:
|
|
299
|
-
raise ValueError(
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
407
|
+
raise ValueError(
|
|
408
|
+
"custom_solver_config must be provided when solver_name='custom'"
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
if "solver" not in custom_solver_config:
|
|
412
|
+
raise ValueError(
|
|
413
|
+
"custom_solver_config must contain 'solver' key with the actual solver name"
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
actual_solver = custom_solver_config["solver"]
|
|
417
|
+
custom_options = custom_solver_config.get("solver_options", {})
|
|
418
|
+
|
|
307
419
|
# Merge with any additional solver_options passed separately
|
|
308
420
|
if solver_options:
|
|
309
|
-
merged_options = {
|
|
421
|
+
merged_options = {
|
|
422
|
+
"solver_options": {**custom_options, **solver_options}
|
|
423
|
+
}
|
|
310
424
|
else:
|
|
311
|
-
merged_options =
|
|
312
|
-
|
|
313
|
-
|
|
425
|
+
merged_options = (
|
|
426
|
+
{"solver_options": custom_options} if custom_options else None
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
logger.info(
|
|
430
|
+
f"Using custom solver configuration: {actual_solver} with options: {custom_options}"
|
|
431
|
+
)
|
|
314
432
|
return actual_solver, merged_options
|
|
315
|
-
|
|
433
|
+
|
|
316
434
|
# Handle "default" solver
|
|
317
|
-
if solver_name ==
|
|
435
|
+
if solver_name == "default":
|
|
318
436
|
# Try to read user's default solver preference
|
|
319
437
|
actual_solver = self._resolve_default_solver()
|
|
320
438
|
logger.info(f"Resolved 'default' solver to: {actual_solver}")
|
|
321
439
|
return actual_solver, solver_options
|
|
322
|
-
|
|
440
|
+
|
|
323
441
|
# Handle special Gurobi configurations
|
|
324
|
-
if solver_name ==
|
|
442
|
+
if solver_name == "gurobi (barrier)":
|
|
325
443
|
gurobi_barrier_options = {
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
444
|
+
"solver_options": {
|
|
445
|
+
"Method": 2, # Barrier
|
|
446
|
+
"Crossover": 0, # Skip crossover
|
|
447
|
+
"MIPGap": 0.05, # 5% gap
|
|
448
|
+
"Threads": 0, # Use all cores (0 = auto)
|
|
449
|
+
"Presolve": 2, # Aggressive presolve
|
|
450
|
+
"ConcurrentMIP": 1, # Parallel root strategies
|
|
451
|
+
"BarConvTol": 1e-4, # Relaxed barrier convergence
|
|
452
|
+
"FeasibilityTol": 1e-5,
|
|
453
|
+
"OptimalityTol": 1e-5,
|
|
454
|
+
"NumericFocus": 1, # Improve stability
|
|
455
|
+
"PreSparsify": 1,
|
|
338
456
|
}
|
|
339
457
|
}
|
|
340
458
|
# Merge with any additional options
|
|
341
459
|
if solver_options:
|
|
342
460
|
gurobi_barrier_options.update(solver_options)
|
|
343
|
-
return
|
|
461
|
+
return "gurobi", gurobi_barrier_options
|
|
344
462
|
|
|
345
|
-
elif solver_name ==
|
|
463
|
+
elif solver_name == "gurobi (barrier homogeneous)":
|
|
346
464
|
gurobi_barrier_homogeneous_options = {
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
465
|
+
"solver_options": {
|
|
466
|
+
"Method": 2, # Barrier
|
|
467
|
+
"Crossover": 0, # Skip crossover
|
|
468
|
+
"MIPGap": 0.05,
|
|
469
|
+
"Threads": 0, # Use all cores (0 = auto)
|
|
470
|
+
"Presolve": 2,
|
|
471
|
+
"ConcurrentMIP": 1,
|
|
472
|
+
"BarConvTol": 1e-4,
|
|
473
|
+
"FeasibilityTol": 1e-5,
|
|
474
|
+
"OptimalityTol": 1e-5,
|
|
475
|
+
"NumericFocus": 1,
|
|
476
|
+
"PreSparsify": 1,
|
|
477
|
+
"BarHomogeneous": 1, # Enable homogeneous barrier algorithm
|
|
360
478
|
}
|
|
361
479
|
}
|
|
362
480
|
if solver_options:
|
|
363
481
|
gurobi_barrier_homogeneous_options.update(solver_options)
|
|
364
|
-
return
|
|
482
|
+
return "gurobi", gurobi_barrier_homogeneous_options
|
|
365
483
|
|
|
366
|
-
elif solver_name ==
|
|
484
|
+
elif solver_name == "gurobi (barrier+crossover balanced)":
|
|
367
485
|
gurobi_options_balanced = {
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
486
|
+
"solver_options": {
|
|
487
|
+
"Method": 2,
|
|
488
|
+
"Crossover": 1, # Dual crossover
|
|
489
|
+
"MIPGap": 0.01,
|
|
490
|
+
"Threads": 0, # Use all cores (0 = auto)
|
|
491
|
+
"Presolve": 2,
|
|
492
|
+
"Heuristics": 0.1,
|
|
493
|
+
"Cuts": 2,
|
|
494
|
+
"ConcurrentMIP": 1,
|
|
495
|
+
"BarConvTol": 1e-6,
|
|
496
|
+
"FeasibilityTol": 1e-6,
|
|
497
|
+
"OptimalityTol": 1e-6,
|
|
498
|
+
"NumericFocus": 1,
|
|
499
|
+
"PreSparsify": 1,
|
|
382
500
|
}
|
|
383
501
|
}
|
|
384
502
|
if solver_options:
|
|
385
503
|
gurobi_options_balanced.update(solver_options)
|
|
386
504
|
logger.info(f"Using Gurobi Barrier+Dual Crossover Balanced configuration")
|
|
387
|
-
return
|
|
505
|
+
return "gurobi", gurobi_options_balanced
|
|
388
506
|
|
|
389
|
-
elif solver_name ==
|
|
507
|
+
elif solver_name == "gurobi (dual simplex)":
|
|
390
508
|
gurobi_dual_options = {
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
509
|
+
"solver_options": {
|
|
510
|
+
"Method": 1, # Dual simplex method
|
|
511
|
+
"Threads": 0, # Use all available cores
|
|
512
|
+
"Presolve": 2, # Aggressive presolve
|
|
395
513
|
}
|
|
396
514
|
}
|
|
397
515
|
if solver_options:
|
|
398
516
|
gurobi_dual_options.update(solver_options)
|
|
399
|
-
return
|
|
400
|
-
|
|
517
|
+
return "gurobi", gurobi_dual_options
|
|
518
|
+
|
|
401
519
|
# Handle special Mosek configurations
|
|
402
|
-
elif solver_name ==
|
|
520
|
+
elif solver_name == "mosek (default)":
|
|
403
521
|
# No custom options - let Mosek use its default configuration
|
|
404
522
|
mosek_default_options = {
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
523
|
+
"solver_options": {
|
|
524
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # MIP relative gap tolerance (5% to match Gurobi)
|
|
525
|
+
"MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
|
|
408
526
|
}
|
|
409
527
|
}
|
|
410
528
|
if solver_options:
|
|
411
|
-
mosek_default_options[
|
|
412
|
-
logger.info(
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
529
|
+
mosek_default_options["solver_options"].update(solver_options)
|
|
530
|
+
logger.info(
|
|
531
|
+
f"Using Mosek with default configuration (auto-select optimizer) and moderate MIP strategies"
|
|
532
|
+
)
|
|
533
|
+
return "mosek", mosek_default_options
|
|
534
|
+
|
|
535
|
+
elif solver_name == "mosek (barrier)":
|
|
416
536
|
mosek_barrier_options = {
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
537
|
+
"solver_options": {
|
|
538
|
+
"MSK_IPAR_INTPNT_BASIS": 0, # Skip crossover (barrier-only) - 0 = MSK_BI_NEVER
|
|
539
|
+
"MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance
|
|
540
|
+
"MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi primal feasibility
|
|
541
|
+
"MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi dual feasibility
|
|
422
542
|
# Removed MSK_DPAR_INTPNT_TOL_INFEAS - was 1000x tighter than other tolerances!
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
543
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
|
|
544
|
+
"MSK_IPAR_PRESOLVE_USE": 2, # Aggressive presolve (match Gurobi Presolve=2)
|
|
545
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap
|
|
546
|
+
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
|
|
547
|
+
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour
|
|
428
548
|
}
|
|
429
549
|
}
|
|
430
550
|
if solver_options:
|
|
431
|
-
mosek_barrier_options[
|
|
432
|
-
logger.info(
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
551
|
+
mosek_barrier_options["solver_options"].update(solver_options)
|
|
552
|
+
logger.info(
|
|
553
|
+
f"Using Mosek Barrier with aggressive presolve and relaxed tolerances"
|
|
554
|
+
)
|
|
555
|
+
return "mosek", mosek_barrier_options
|
|
556
|
+
|
|
557
|
+
elif solver_name == "mosek (barrier+crossover)":
|
|
436
558
|
mosek_barrier_crossover_options = {
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
559
|
+
"solver_options": {
|
|
560
|
+
"MSK_IPAR_INTPNT_BASIS": 1, # Always crossover (1 = MSK_BI_ALWAYS)
|
|
561
|
+
"MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance (was 1e-6)
|
|
562
|
+
"MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi (was 1e-6)
|
|
563
|
+
"MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi (was 1e-6)
|
|
564
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
|
|
565
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
566
|
+
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
|
|
567
|
+
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
|
|
446
568
|
}
|
|
447
569
|
}
|
|
448
570
|
if solver_options:
|
|
449
|
-
mosek_barrier_crossover_options[
|
|
450
|
-
logger.info(
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
mosek_dual_options = {
|
|
455
|
-
'solver_options': {
|
|
456
|
-
'MSK_IPAR_NUM_THREADS': 0, # Use all available cores (0 = automatic)
|
|
457
|
-
'MSK_IPAR_PRESOLVE_USE': 1, # Force presolve
|
|
458
|
-
'MSK_DPAR_MIO_REL_GAP_CONST': 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
459
|
-
'MSK_IPAR_MIO_ROOT_OPTIMIZER': 1, # Use dual simplex for MIP root
|
|
460
|
-
'MSK_DPAR_MIO_MAX_TIME': 36000, # Max time 10 hour (safety limit)
|
|
571
|
+
mosek_barrier_crossover_options["solver_options"].update(solver_options)
|
|
572
|
+
logger.info(
|
|
573
|
+
f"Using Mosek Barrier+Crossover configuration with Gurobi-matched tolerances and moderate MIP strategies"
|
|
574
|
+
)
|
|
575
|
+
return "mosek", mosek_barrier_crossover_options
|
|
461
576
|
|
|
577
|
+
elif solver_name == "mosek (dual simplex)":
|
|
578
|
+
mosek_dual_options = {
|
|
579
|
+
"solver_options": {
|
|
580
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = automatic)
|
|
581
|
+
"MSK_IPAR_PRESOLVE_USE": 1, # Force presolve
|
|
582
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
583
|
+
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 1, # Use dual simplex for MIP root
|
|
584
|
+
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
|
|
462
585
|
}
|
|
463
586
|
}
|
|
464
587
|
if solver_options:
|
|
465
|
-
mosek_dual_options[
|
|
466
|
-
logger.info(
|
|
467
|
-
|
|
468
|
-
|
|
588
|
+
mosek_dual_options["solver_options"].update(solver_options)
|
|
589
|
+
logger.info(
|
|
590
|
+
f"Using Mosek Dual Simplex configuration with Gurobi-matched tolerances and moderate MIP strategies"
|
|
591
|
+
)
|
|
592
|
+
return "mosek", mosek_dual_options
|
|
593
|
+
|
|
469
594
|
# Check if this is a known valid solver name
|
|
470
|
-
elif solver_name ==
|
|
595
|
+
elif solver_name == "mosek":
|
|
471
596
|
# Add default MILP-friendly settings for plain Mosek
|
|
472
597
|
mosek_defaults = {
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
598
|
+
"solver_options": {
|
|
599
|
+
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-4)
|
|
600
|
+
"MSK_IPAR_MIO_MAX_TIME": 36000, # Max time 1 hour
|
|
601
|
+
"MSK_IPAR_NUM_THREADS": 0, # Use all cores (0 = auto)
|
|
477
602
|
}
|
|
478
603
|
}
|
|
479
604
|
if solver_options:
|
|
480
|
-
mosek_defaults[
|
|
481
|
-
logger.info(
|
|
605
|
+
mosek_defaults["solver_options"].update(solver_options)
|
|
606
|
+
logger.info(
|
|
607
|
+
f"Using Mosek with barrier method for MIP (interior-point for root/nodes)"
|
|
608
|
+
)
|
|
482
609
|
return solver_name, mosek_defaults
|
|
483
|
-
|
|
484
|
-
elif solver_name ==
|
|
610
|
+
|
|
611
|
+
elif solver_name == "gurobi":
|
|
485
612
|
# Add default MILP-friendly settings for plain Gurobi (for consistency)
|
|
486
613
|
gurobi_defaults = {
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
614
|
+
"solver_options": {
|
|
615
|
+
"MIPGap": 1e-4, # 0.01% gap
|
|
616
|
+
"TimeLimit": 3600, # 1 hour
|
|
617
|
+
"Threads": 0, # Use all cores
|
|
618
|
+
"OutputFlag": 1, # Enable output
|
|
492
619
|
}
|
|
493
620
|
}
|
|
494
621
|
if solver_options:
|
|
495
|
-
gurobi_defaults[
|
|
622
|
+
gurobi_defaults["solver_options"].update(solver_options)
|
|
496
623
|
logger.info(f"Using Gurobi with default MILP-friendly settings")
|
|
497
624
|
return solver_name, gurobi_defaults
|
|
498
|
-
|
|
625
|
+
|
|
499
626
|
# Handle special COPT configurations
|
|
500
|
-
elif solver_name ==
|
|
627
|
+
elif solver_name == "copt (barrier)":
|
|
501
628
|
copt_barrier_options = {
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
629
|
+
"solver_options": {
|
|
630
|
+
"LpMethod": 2, # Barrier method
|
|
631
|
+
"Crossover": 0, # Skip crossover for speed
|
|
632
|
+
"RelGap": 0.05, # 5% MIP gap (match Gurobi)
|
|
633
|
+
"TimeLimit": 7200, # 1 hour time limit
|
|
634
|
+
"Threads": -1, # 4 threads (memory-conscious)
|
|
635
|
+
"Presolve": 3, # Aggressive presolve
|
|
636
|
+
"Scaling": 1, # Enable scaling
|
|
637
|
+
"FeasTol": 1e-5, # Match Gurobi feasibility
|
|
638
|
+
"DualTol": 1e-5, # Match Gurobi dual tolerance
|
|
512
639
|
# MIP performance settings
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
640
|
+
"CutLevel": 2, # Normal cut generation
|
|
641
|
+
"HeurLevel": 3, # Aggressive heuristics
|
|
642
|
+
"StrongBranching": 1, # Fast strong branching
|
|
516
643
|
}
|
|
517
644
|
}
|
|
518
645
|
if solver_options:
|
|
519
|
-
copt_barrier_options[
|
|
520
|
-
logger.info(
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
646
|
+
copt_barrier_options["solver_options"].update(solver_options)
|
|
647
|
+
logger.info(
|
|
648
|
+
f"Using COPT Barrier configuration (fast interior-point method)"
|
|
649
|
+
)
|
|
650
|
+
return "copt", copt_barrier_options
|
|
651
|
+
|
|
652
|
+
elif solver_name == "copt (barrier homogeneous)":
|
|
524
653
|
copt_barrier_homogeneous_options = {
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
654
|
+
"solver_options": {
|
|
655
|
+
"LpMethod": 2, # Barrier method
|
|
656
|
+
"Crossover": 0, # Skip crossover
|
|
657
|
+
"BarHomogeneous": 1, # Use homogeneous self-dual form
|
|
658
|
+
"RelGap": 0.05, # 5% MIP gap
|
|
659
|
+
"TimeLimit": 3600, # 1 hour
|
|
660
|
+
"Threads": -1, # 4 threads (memory-conscious)
|
|
661
|
+
"Presolve": 3, # Aggressive presolve
|
|
662
|
+
"Scaling": 1, # Enable scaling
|
|
663
|
+
"FeasTol": 1e-5,
|
|
664
|
+
"DualTol": 1e-5,
|
|
536
665
|
# MIP performance settings
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
666
|
+
"CutLevel": 2, # Normal cuts
|
|
667
|
+
"HeurLevel": 3, # Aggressive heuristics
|
|
668
|
+
"StrongBranching": 1, # Fast strong branching
|
|
540
669
|
}
|
|
541
670
|
}
|
|
542
671
|
if solver_options:
|
|
543
|
-
copt_barrier_homogeneous_options[
|
|
672
|
+
copt_barrier_homogeneous_options["solver_options"].update(
|
|
673
|
+
solver_options
|
|
674
|
+
)
|
|
544
675
|
logger.info(f"Using COPT Barrier Homogeneous configuration")
|
|
545
|
-
return
|
|
546
|
-
|
|
547
|
-
elif solver_name ==
|
|
676
|
+
return "copt", copt_barrier_homogeneous_options
|
|
677
|
+
|
|
678
|
+
elif solver_name == "copt (barrier+crossover)":
|
|
548
679
|
copt_barrier_crossover_options = {
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
680
|
+
"solver_options": {
|
|
681
|
+
"LpMethod": 2, # Barrier method
|
|
682
|
+
"Crossover": 1, # Enable crossover for better solutions
|
|
683
|
+
"RelGap": 0.05, # 5% MIP gap (relaxed for faster solves)
|
|
684
|
+
"TimeLimit": 36000, # 10 hour
|
|
685
|
+
"Threads": -1, # Use all cores
|
|
686
|
+
"Presolve": 2, # Aggressive presolve
|
|
687
|
+
"Scaling": 1, # Enable scaling
|
|
688
|
+
"FeasTol": 1e-4, # Tighter feasibility
|
|
689
|
+
"DualTol": 1e-4, # Tighter dual tolerance
|
|
559
690
|
}
|
|
560
691
|
}
|
|
561
692
|
if solver_options:
|
|
562
|
-
copt_barrier_crossover_options[
|
|
563
|
-
logger.info(
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
693
|
+
copt_barrier_crossover_options["solver_options"].update(solver_options)
|
|
694
|
+
logger.info(
|
|
695
|
+
f"Using COPT Barrier+Crossover configuration (balanced performance)"
|
|
696
|
+
)
|
|
697
|
+
return "copt", copt_barrier_crossover_options
|
|
698
|
+
|
|
699
|
+
elif solver_name == "copt (dual simplex)":
|
|
567
700
|
copt_dual_simplex_options = {
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
701
|
+
"solver_options": {
|
|
702
|
+
"LpMethod": 1, # Dual simplex method
|
|
703
|
+
"RelGap": 0.05, # 5% MIP gap
|
|
704
|
+
"TimeLimit": 3600, # 1 hour
|
|
705
|
+
"Threads": -1, # Use all cores
|
|
706
|
+
"Presolve": 3, # Aggressive presolve
|
|
707
|
+
"Scaling": 1, # Enable scaling
|
|
708
|
+
"FeasTol": 1e-6,
|
|
709
|
+
"DualTol": 1e-6,
|
|
577
710
|
# MIP performance settings
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
711
|
+
"CutLevel": 2, # Normal cuts
|
|
712
|
+
"HeurLevel": 2, # Normal heuristics
|
|
713
|
+
"StrongBranching": 1, # Fast strong branching
|
|
581
714
|
}
|
|
582
715
|
}
|
|
583
716
|
if solver_options:
|
|
584
|
-
copt_dual_simplex_options[
|
|
717
|
+
copt_dual_simplex_options["solver_options"].update(solver_options)
|
|
585
718
|
logger.info(f"Using COPT Dual Simplex configuration (robust method)")
|
|
586
|
-
return
|
|
587
|
-
|
|
588
|
-
elif solver_name ==
|
|
719
|
+
return "copt", copt_dual_simplex_options
|
|
720
|
+
|
|
721
|
+
elif solver_name == "copt (concurrent)":
|
|
589
722
|
copt_concurrent_options = {
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
723
|
+
"solver_options": {
|
|
724
|
+
"LpMethod": 4, # Concurrent (simplex + barrier)
|
|
725
|
+
"RelGap": 0.05, # 5% MIP gap
|
|
726
|
+
"TimeLimit": 3600, # 1 hour
|
|
727
|
+
"Threads": -1, # Use all cores
|
|
728
|
+
"Presolve": 3, # Aggressive presolve
|
|
729
|
+
"Scaling": 1, # Enable scaling
|
|
730
|
+
"FeasTol": 1e-5,
|
|
731
|
+
"DualTol": 1e-5,
|
|
599
732
|
# MIP performance settings
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
733
|
+
"CutLevel": 2, # Normal cuts
|
|
734
|
+
"HeurLevel": 3, # Aggressive heuristics
|
|
735
|
+
"StrongBranching": 1, # Fast strong branching
|
|
603
736
|
}
|
|
604
737
|
}
|
|
605
738
|
if solver_options:
|
|
606
|
-
copt_concurrent_options[
|
|
607
|
-
logger.info(
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
739
|
+
copt_concurrent_options["solver_options"].update(solver_options)
|
|
740
|
+
logger.info(
|
|
741
|
+
f"Using COPT Concurrent configuration (parallel simplex + barrier)"
|
|
742
|
+
)
|
|
743
|
+
return "copt", copt_concurrent_options
|
|
744
|
+
|
|
745
|
+
elif solver_name in ["highs", "cplex", "glpk", "cbc", "scip", "copt"]:
|
|
611
746
|
return solver_name, solver_options
|
|
612
|
-
|
|
747
|
+
|
|
613
748
|
else:
|
|
614
749
|
# Unknown solver name - log warning and fall back to highs
|
|
615
|
-
logger.warning(
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
750
|
+
logger.warning(
|
|
751
|
+
f"Unknown solver name '{solver_name}' - falling back to 'highs'"
|
|
752
|
+
)
|
|
753
|
+
return "highs", solver_options
|
|
754
|
+
|
|
619
755
|
def _detect_constraint_type(self, constraint_code: str) -> str:
|
|
620
756
|
"""
|
|
621
757
|
Detect if constraint is network-modification or model-constraint type.
|
|
622
|
-
|
|
758
|
+
|
|
623
759
|
Args:
|
|
624
760
|
constraint_code: The constraint code to analyze
|
|
625
|
-
|
|
761
|
+
|
|
626
762
|
Returns:
|
|
627
763
|
"model_constraint" or "network_modification"
|
|
628
764
|
"""
|
|
629
765
|
# Type 2 indicators (model constraints) - need access to optimization model
|
|
630
766
|
model_indicators = [
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
767
|
+
"n.optimize.create_model()",
|
|
768
|
+
"m.variables",
|
|
769
|
+
"m.add_constraints",
|
|
770
|
+
"gen_p =",
|
|
771
|
+
"constraint_expr =",
|
|
772
|
+
"LinearExpression",
|
|
773
|
+
"linopy",
|
|
774
|
+
"Generator-p",
|
|
775
|
+
"lhs <=",
|
|
776
|
+
"constraint_expr =",
|
|
641
777
|
]
|
|
642
|
-
|
|
778
|
+
|
|
643
779
|
# Type 1 indicators (network modifications) - modify network directly
|
|
644
780
|
network_indicators = [
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
781
|
+
"n.generators.loc",
|
|
782
|
+
"n.add(",
|
|
783
|
+
"n.buses.",
|
|
784
|
+
"n.lines.",
|
|
785
|
+
"network.generators.loc",
|
|
786
|
+
"network.add(",
|
|
787
|
+
"network.buses.",
|
|
788
|
+
"network.lines.",
|
|
653
789
|
]
|
|
654
|
-
|
|
790
|
+
|
|
655
791
|
# Check for model constraint indicators first (more specific)
|
|
656
792
|
if any(indicator in constraint_code for indicator in model_indicators):
|
|
657
793
|
return "model_constraint"
|
|
@@ -661,170 +797,214 @@ class NetworkSolver:
|
|
|
661
797
|
# Default to network_modification for safety (existing behavior)
|
|
662
798
|
return "network_modification"
|
|
663
799
|
|
|
664
|
-
def _create_extra_functionality(
|
|
800
|
+
def _create_extra_functionality(
|
|
801
|
+
self, optimization_constraints: list, constraint_applicator
|
|
802
|
+
) -> callable:
|
|
665
803
|
"""
|
|
666
804
|
Create extra_functionality function for optimization-time constraints.
|
|
667
|
-
|
|
805
|
+
|
|
668
806
|
This matches the old PyPSA solver's approach to applying constraints during optimization.
|
|
669
|
-
|
|
807
|
+
|
|
670
808
|
Args:
|
|
671
809
|
optimization_constraints: List of optimization constraint dictionaries
|
|
672
810
|
constraint_applicator: ConstraintApplicator instance
|
|
673
|
-
|
|
811
|
+
|
|
674
812
|
Returns:
|
|
675
813
|
Function that can be passed to network.optimize(extra_functionality=...)
|
|
676
814
|
"""
|
|
815
|
+
|
|
677
816
|
def extra_functionality(network, snapshots):
|
|
678
817
|
"""Apply optimization constraints during solve - matches old code structure"""
|
|
679
818
|
try:
|
|
680
|
-
logger.info(
|
|
681
|
-
|
|
819
|
+
logger.info(
|
|
820
|
+
f"Applying {len(optimization_constraints)} optimization constraints during solve"
|
|
821
|
+
)
|
|
822
|
+
|
|
682
823
|
# Apply each constraint in priority order
|
|
683
|
-
sorted_constraints = sorted(
|
|
684
|
-
|
|
824
|
+
sorted_constraints = sorted(
|
|
825
|
+
optimization_constraints, key=lambda x: x.get("priority", 0)
|
|
826
|
+
)
|
|
827
|
+
|
|
685
828
|
for constraint in sorted_constraints:
|
|
686
829
|
try:
|
|
687
|
-
constraint_applicator.apply_optimization_constraint(
|
|
830
|
+
constraint_applicator.apply_optimization_constraint(
|
|
831
|
+
network, snapshots, constraint
|
|
832
|
+
)
|
|
688
833
|
except Exception as e:
|
|
689
|
-
logger.error(
|
|
834
|
+
logger.error(
|
|
835
|
+
f"Failed to apply optimization constraint {constraint.get('name', 'unknown')}: {e}"
|
|
836
|
+
)
|
|
690
837
|
continue
|
|
691
|
-
|
|
838
|
+
|
|
692
839
|
logger.info("Optimization constraints applied successfully")
|
|
693
|
-
|
|
840
|
+
|
|
694
841
|
except Exception as e:
|
|
695
842
|
logger.error(f"Failed to apply optimization constraints: {e}")
|
|
696
843
|
# Don't re-raise - let optimization continue
|
|
697
|
-
|
|
844
|
+
|
|
698
845
|
return extra_functionality
|
|
699
|
-
|
|
700
|
-
def _set_snapshot_weightings_after_multiperiod(
|
|
701
|
-
|
|
846
|
+
|
|
847
|
+
def _set_snapshot_weightings_after_multiperiod(
|
|
848
|
+
self, conn, network: "pypsa.Network"
|
|
849
|
+
):
|
|
850
|
+
"""Set snapshot weightings AFTER multi-period setup - matches old code approach (single network per database)."""
|
|
702
851
|
try:
|
|
703
852
|
from pyconvexity.models import get_network_time_periods, get_network_info
|
|
704
|
-
|
|
705
|
-
time_periods = get_network_time_periods(conn
|
|
853
|
+
|
|
854
|
+
time_periods = get_network_time_periods(conn)
|
|
706
855
|
if time_periods and len(network.snapshots) > 0:
|
|
707
|
-
logger.info(
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
856
|
+
logger.info(
|
|
857
|
+
f"Setting snapshot weightings AFTER multi-period setup for {len(time_periods)} time periods"
|
|
858
|
+
)
|
|
859
|
+
|
|
860
|
+
# Get network info to determine time interval
|
|
861
|
+
network_info = get_network_info(conn)
|
|
862
|
+
time_interval = network_info.get("time_interval", "1H")
|
|
712
863
|
weight = self._parse_time_interval(time_interval)
|
|
713
|
-
|
|
864
|
+
|
|
714
865
|
if weight is None:
|
|
715
866
|
weight = 1.0
|
|
716
|
-
logger.warning(
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
867
|
+
logger.warning(
|
|
868
|
+
f"Could not parse time interval '{time_interval}', using default weight of 1.0"
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
logger.info(
|
|
872
|
+
f"Parsed time interval '{time_interval}' -> weight = {weight}"
|
|
873
|
+
)
|
|
874
|
+
|
|
720
875
|
# Create weightings array - all snapshots get the same weight for this time resolution
|
|
721
876
|
weightings = [weight] * len(time_periods)
|
|
722
|
-
|
|
877
|
+
|
|
723
878
|
if len(weightings) == len(network.snapshots):
|
|
724
879
|
# Set all three columns like the old code - critical for proper objective calculation
|
|
725
|
-
network.snapshot_weightings.loc[:,
|
|
726
|
-
network.snapshot_weightings.loc[:,
|
|
727
|
-
network.snapshot_weightings.loc[:,
|
|
728
|
-
logger.info(
|
|
729
|
-
|
|
880
|
+
network.snapshot_weightings.loc[:, "objective"] = weightings
|
|
881
|
+
network.snapshot_weightings.loc[:, "generators"] = weightings
|
|
882
|
+
network.snapshot_weightings.loc[:, "stores"] = weightings
|
|
883
|
+
logger.info(
|
|
884
|
+
f"Set snapshot weightings AFTER multi-period setup: objective, generators, stores columns"
|
|
885
|
+
)
|
|
886
|
+
|
|
730
887
|
# Debug logging like old code
|
|
731
|
-
logger.info(
|
|
732
|
-
|
|
733
|
-
|
|
888
|
+
logger.info(
|
|
889
|
+
f"Snapshot weightings shape: {network.snapshot_weightings.shape}"
|
|
890
|
+
)
|
|
891
|
+
logger.info(
|
|
892
|
+
f"Unique values in objective column: {network.snapshot_weightings['objective'].unique()}"
|
|
893
|
+
)
|
|
894
|
+
logger.info(
|
|
895
|
+
f"Sum of objective column: {network.snapshot_weightings['objective'].sum()}"
|
|
896
|
+
)
|
|
734
897
|
logger.info(f"Weight per snapshot: {weight} hours")
|
|
735
898
|
else:
|
|
736
|
-
logger.warning(
|
|
899
|
+
logger.warning(
|
|
900
|
+
f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})"
|
|
901
|
+
)
|
|
737
902
|
except Exception as e:
|
|
738
|
-
logger.warning(
|
|
903
|
+
logger.warning(
|
|
904
|
+
f"Failed to set snapshot weightings after multi-period setup: {e}"
|
|
905
|
+
)
|
|
739
906
|
logger.exception("Full traceback:")
|
|
740
|
-
|
|
907
|
+
|
|
741
908
|
def _parse_time_interval(self, time_interval: str) -> Optional[float]:
|
|
742
909
|
"""Parse time interval string to hours - handles multiple formats."""
|
|
743
910
|
if not time_interval:
|
|
744
911
|
return None
|
|
745
|
-
|
|
912
|
+
|
|
746
913
|
try:
|
|
747
914
|
# Clean up the string
|
|
748
915
|
interval = time_interval.strip()
|
|
749
|
-
|
|
916
|
+
|
|
750
917
|
# Handle ISO 8601 duration format (PT3H, PT30M, etc.)
|
|
751
|
-
if interval.startswith(
|
|
918
|
+
if interval.startswith("PT") and interval.endswith("H"):
|
|
752
919
|
# Extract hours (e.g., 'PT3H' -> 3.0)
|
|
753
920
|
hours_str = interval[2:-1] # Remove 'PT' and 'H'
|
|
754
921
|
return float(hours_str)
|
|
755
|
-
elif interval.startswith(
|
|
922
|
+
elif interval.startswith("PT") and interval.endswith("M"):
|
|
756
923
|
# Extract minutes (e.g., 'PT30M' -> 0.5)
|
|
757
924
|
minutes_str = interval[2:-1] # Remove 'PT' and 'M'
|
|
758
925
|
return float(minutes_str) / 60.0
|
|
759
|
-
elif interval.startswith(
|
|
926
|
+
elif interval.startswith("PT") and interval.endswith("S"):
|
|
760
927
|
# Extract seconds (e.g., 'PT3600S' -> 1.0)
|
|
761
928
|
seconds_str = interval[2:-1] # Remove 'PT' and 'S'
|
|
762
929
|
return float(seconds_str) / 3600.0
|
|
763
|
-
|
|
930
|
+
|
|
764
931
|
# Handle simple frequency strings (3H, 2D, etc.)
|
|
765
|
-
elif interval.endswith(
|
|
932
|
+
elif interval.endswith("H") or interval.endswith("h"):
|
|
766
933
|
hours_str = interval[:-1]
|
|
767
934
|
return float(hours_str) if hours_str else 1.0
|
|
768
|
-
elif interval.endswith(
|
|
935
|
+
elif interval.endswith("D") or interval.endswith("d"):
|
|
769
936
|
days_str = interval[:-1]
|
|
770
937
|
return float(days_str) * 24 if days_str else 24.0
|
|
771
|
-
elif interval.endswith(
|
|
938
|
+
elif interval.endswith("M") or interval.endswith("m"):
|
|
772
939
|
minutes_str = interval[:-1]
|
|
773
|
-
return float(minutes_str) / 60.0 if minutes_str else 1.0/60.0
|
|
774
|
-
elif interval.endswith(
|
|
940
|
+
return float(minutes_str) / 60.0 if minutes_str else 1.0 / 60.0
|
|
941
|
+
elif interval.endswith("S") or interval.endswith("s"):
|
|
775
942
|
seconds_str = interval[:-1]
|
|
776
|
-
return float(seconds_str) / 3600.0 if seconds_str else 1.0/3600.0
|
|
777
|
-
|
|
943
|
+
return float(seconds_str) / 3600.0 if seconds_str else 1.0 / 3600.0
|
|
944
|
+
|
|
778
945
|
# Try to parse as plain number (assume hours)
|
|
779
946
|
else:
|
|
780
947
|
return float(interval)
|
|
781
|
-
|
|
948
|
+
|
|
782
949
|
except (ValueError, TypeError) as e:
|
|
783
950
|
logger.warning(f"Could not parse time interval '{time_interval}': {e}")
|
|
784
951
|
return None
|
|
785
|
-
|
|
786
|
-
def _calculate_investment_weightings(
|
|
952
|
+
|
|
953
|
+
def _calculate_investment_weightings(
|
|
954
|
+
self, network: "pypsa.Network", discount_rate: float
|
|
955
|
+
) -> None:
|
|
787
956
|
"""
|
|
788
957
|
Calculate investment period weightings using discount rate - matching old PyPSA solver exactly.
|
|
789
|
-
|
|
958
|
+
|
|
790
959
|
Args:
|
|
791
960
|
network: PyPSA Network object
|
|
792
961
|
discount_rate: Discount rate for NPV calculations
|
|
793
962
|
"""
|
|
794
963
|
try:
|
|
795
964
|
import pandas as pd
|
|
796
|
-
|
|
797
|
-
if
|
|
965
|
+
|
|
966
|
+
if (
|
|
967
|
+
not hasattr(network, "investment_periods")
|
|
968
|
+
or len(network.investment_periods) == 0
|
|
969
|
+
):
|
|
798
970
|
return
|
|
799
|
-
|
|
971
|
+
|
|
800
972
|
years = network.investment_periods
|
|
801
973
|
# Convert pandas Index to list for easier handling
|
|
802
|
-
years_list = years.tolist() if hasattr(years,
|
|
803
|
-
|
|
804
|
-
logger.info(
|
|
805
|
-
|
|
974
|
+
years_list = years.tolist() if hasattr(years, "tolist") else list(years)
|
|
975
|
+
|
|
976
|
+
logger.info(
|
|
977
|
+
f"Calculating investment weightings for periods: {years_list} with discount rate: {discount_rate}"
|
|
978
|
+
)
|
|
979
|
+
|
|
806
980
|
# For single year, use simple weighting of 1.0
|
|
807
981
|
if len(years_list) == 1:
|
|
808
982
|
# Single year case
|
|
809
|
-
network.investment_period_weightings = pd.DataFrame(
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
983
|
+
network.investment_period_weightings = pd.DataFrame(
|
|
984
|
+
{
|
|
985
|
+
"objective": pd.Series({years_list[0]: 1.0}),
|
|
986
|
+
"years": pd.Series({years_list[0]: 1}),
|
|
987
|
+
}
|
|
988
|
+
)
|
|
989
|
+
logger.info(
|
|
990
|
+
f"Set single-year investment period weightings for year {years_list[0]}"
|
|
991
|
+
)
|
|
814
992
|
else:
|
|
815
993
|
# Multi-year case - EXACTLY match old code logic
|
|
816
994
|
# Get unique years from the network snapshots to determine period lengths
|
|
817
|
-
if hasattr(network.snapshots,
|
|
995
|
+
if hasattr(network.snapshots, "year"):
|
|
818
996
|
snapshot_years = sorted(network.snapshots.year.unique())
|
|
819
|
-
elif hasattr(network.snapshots,
|
|
997
|
+
elif hasattr(network.snapshots, "get_level_values"):
|
|
820
998
|
# MultiIndex case - get years from 'period' level
|
|
821
|
-
snapshot_years = sorted(
|
|
999
|
+
snapshot_years = sorted(
|
|
1000
|
+
network.snapshots.get_level_values("period").unique()
|
|
1001
|
+
)
|
|
822
1002
|
else:
|
|
823
1003
|
# Fallback: use investment periods as years
|
|
824
1004
|
snapshot_years = years_list
|
|
825
|
-
|
|
1005
|
+
|
|
826
1006
|
logger.info(f"Snapshot years found: {snapshot_years}")
|
|
827
|
-
|
|
1007
|
+
|
|
828
1008
|
# Calculate years per period - EXACTLY matching old code
|
|
829
1009
|
years_diff = []
|
|
830
1010
|
for i, year in enumerate(years_list):
|
|
@@ -836,7 +1016,9 @@ class NetworkSolver:
|
|
|
836
1016
|
# For the last period, calculate based on snapshot coverage
|
|
837
1017
|
if snapshot_years:
|
|
838
1018
|
# Find the last snapshot year that's >= current period year
|
|
839
|
-
last_snapshot_year = max(
|
|
1019
|
+
last_snapshot_year = max(
|
|
1020
|
+
[y for y in snapshot_years if y >= year]
|
|
1021
|
+
)
|
|
840
1022
|
period_years = last_snapshot_year - year + 1
|
|
841
1023
|
else:
|
|
842
1024
|
# Fallback: assume same length as previous period or 1
|
|
@@ -844,71 +1026,83 @@ class NetworkSolver:
|
|
|
844
1026
|
period_years = years_diff[-1] # Same as previous period
|
|
845
1027
|
else:
|
|
846
1028
|
period_years = 1
|
|
847
|
-
|
|
1029
|
+
|
|
848
1030
|
years_diff.append(period_years)
|
|
849
1031
|
logger.info(f"Period {year}: {period_years} years")
|
|
850
|
-
|
|
1032
|
+
|
|
851
1033
|
# Create weightings DataFrame with years column
|
|
852
|
-
weightings_df = pd.DataFrame(
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
1034
|
+
weightings_df = pd.DataFrame(
|
|
1035
|
+
{"years": pd.Series(years_diff, index=years_list)}
|
|
1036
|
+
)
|
|
1037
|
+
|
|
856
1038
|
# Calculate objective weightings with discount rate - EXACTLY matching old code
|
|
857
1039
|
r = discount_rate
|
|
858
1040
|
T = 0 # Cumulative time tracker
|
|
859
|
-
|
|
1041
|
+
|
|
860
1042
|
logger.info(f"Calculating discount factors with rate {r}:")
|
|
861
1043
|
for period, nyears in weightings_df.years.items():
|
|
862
1044
|
# Calculate discount factors for each year in this period
|
|
863
1045
|
discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
|
|
864
1046
|
period_weighting = sum(discounts)
|
|
865
1047
|
weightings_df.at[period, "objective"] = period_weighting
|
|
866
|
-
|
|
867
|
-
logger.info(
|
|
1048
|
+
|
|
1049
|
+
logger.info(
|
|
1050
|
+
f" Period {period}: years {T} to {T + nyears - 1}, discounts={[f'{d:.4f}' for d in discounts]}, sum={period_weighting:.4f}"
|
|
1051
|
+
)
|
|
868
1052
|
T += nyears # Update cumulative time
|
|
869
|
-
|
|
1053
|
+
|
|
870
1054
|
network.investment_period_weightings = weightings_df
|
|
871
1055
|
logger.info(f"Final investment period weightings:")
|
|
872
1056
|
logger.info(f" Years: {weightings_df['years'].to_dict()}")
|
|
873
1057
|
logger.info(f" Objective: {weightings_df['objective'].to_dict()}")
|
|
874
|
-
|
|
1058
|
+
|
|
875
1059
|
except Exception as e:
|
|
876
1060
|
logger.error(f"Failed to calculate investment weightings: {e}")
|
|
877
1061
|
logger.exception("Full traceback:")
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
1062
|
+
|
|
1063
|
+
def _extract_solve_results(
|
|
1064
|
+
self,
|
|
1065
|
+
network: "pypsa.Network",
|
|
1066
|
+
result: Any,
|
|
1067
|
+
solve_time: float,
|
|
1068
|
+
solver_name: str,
|
|
1069
|
+
run_id: str,
|
|
1070
|
+
) -> Dict[str, Any]:
|
|
881
1071
|
"""
|
|
882
1072
|
Extract solve results from PyPSA network.
|
|
883
|
-
|
|
1073
|
+
|
|
884
1074
|
Args:
|
|
885
1075
|
network: Solved PyPSA Network object
|
|
886
1076
|
result: PyPSA solve result
|
|
887
1077
|
solve_time: Time taken to solve
|
|
888
1078
|
solver_name: Name of solver used
|
|
889
1079
|
run_id: Unique run identifier
|
|
890
|
-
|
|
1080
|
+
|
|
891
1081
|
Returns:
|
|
892
1082
|
Dictionary with solve results and metadata
|
|
893
1083
|
"""
|
|
894
1084
|
try:
|
|
895
1085
|
# Extract basic solve information
|
|
896
|
-
status = getattr(result,
|
|
897
|
-
objective_value = getattr(network,
|
|
898
|
-
|
|
1086
|
+
status = getattr(result, "status", "unknown")
|
|
1087
|
+
objective_value = getattr(network, "objective", None)
|
|
1088
|
+
|
|
899
1089
|
# Debug logging
|
|
900
|
-
logger.info(
|
|
901
|
-
|
|
1090
|
+
logger.info(
|
|
1091
|
+
f"Raw PyPSA result attributes: {dir(result) if result else 'None'}"
|
|
1092
|
+
)
|
|
1093
|
+
if hasattr(result, "termination_condition"):
|
|
902
1094
|
logger.info(f"Termination condition: {result.termination_condition}")
|
|
903
|
-
if hasattr(result,
|
|
1095
|
+
if hasattr(result, "solver"):
|
|
904
1096
|
logger.info(f"Solver info: {result.solver}")
|
|
905
|
-
|
|
1097
|
+
|
|
906
1098
|
# Convert PyPSA result to dictionary format
|
|
907
1099
|
result_dict = self._convert_pypsa_result_to_dict(result)
|
|
908
|
-
|
|
1100
|
+
|
|
909
1101
|
# Determine success based on multiple criteria
|
|
910
|
-
success = self._determine_solve_success(
|
|
911
|
-
|
|
1102
|
+
success = self._determine_solve_success(
|
|
1103
|
+
result, network, status, objective_value
|
|
1104
|
+
)
|
|
1105
|
+
|
|
912
1106
|
solve_result = {
|
|
913
1107
|
"success": success,
|
|
914
1108
|
"status": status,
|
|
@@ -923,16 +1117,16 @@ class NetworkSolver:
|
|
|
923
1117
|
"num_loads": len(network.loads),
|
|
924
1118
|
"num_lines": len(network.lines),
|
|
925
1119
|
"num_links": len(network.links),
|
|
926
|
-
"num_snapshots": len(network.snapshots)
|
|
1120
|
+
"num_snapshots": len(network.snapshots),
|
|
927
1121
|
}
|
|
928
|
-
|
|
1122
|
+
|
|
929
1123
|
# Add multi-period information if available
|
|
930
|
-
if hasattr(network,
|
|
1124
|
+
if hasattr(network, "_available_years") and network._available_years:
|
|
931
1125
|
solve_result["years"] = network._available_years
|
|
932
1126
|
solve_result["multi_period"] = len(network._available_years) > 1
|
|
933
|
-
|
|
1127
|
+
|
|
934
1128
|
return solve_result
|
|
935
|
-
|
|
1129
|
+
|
|
936
1130
|
except Exception as e:
|
|
937
1131
|
logger.error(f"Failed to extract solve results: {e}")
|
|
938
1132
|
return {
|
|
@@ -942,91 +1136,109 @@ class NetworkSolver:
|
|
|
942
1136
|
"solve_time": solve_time,
|
|
943
1137
|
"solver_name": solver_name,
|
|
944
1138
|
"run_id": run_id,
|
|
945
|
-
"objective_value": None
|
|
1139
|
+
"objective_value": None,
|
|
946
1140
|
}
|
|
947
|
-
|
|
948
|
-
def _determine_solve_success(
|
|
1141
|
+
|
|
1142
|
+
def _determine_solve_success(
|
|
1143
|
+
self,
|
|
1144
|
+
result: Any,
|
|
1145
|
+
network: "pypsa.Network",
|
|
1146
|
+
status: str,
|
|
1147
|
+
objective_value: Optional[float],
|
|
1148
|
+
) -> bool:
|
|
949
1149
|
"""
|
|
950
1150
|
Determine if solve was successful based on multiple criteria.
|
|
951
|
-
|
|
1151
|
+
|
|
952
1152
|
PyPSA sometimes returns status='unknown' even for successful solves,
|
|
953
1153
|
so we need to check multiple indicators.
|
|
954
1154
|
"""
|
|
955
1155
|
try:
|
|
956
1156
|
# Check explicit status first
|
|
957
|
-
if status in [
|
|
1157
|
+
if status in ["optimal", "feasible"]:
|
|
958
1158
|
logger.info(f"Success determined by status: {status}")
|
|
959
1159
|
return True
|
|
960
|
-
|
|
1160
|
+
|
|
961
1161
|
# Check termination condition
|
|
962
|
-
if hasattr(result,
|
|
1162
|
+
if hasattr(result, "termination_condition"):
|
|
963
1163
|
term_condition = str(result.termination_condition).lower()
|
|
964
|
-
if
|
|
965
|
-
logger.info(
|
|
1164
|
+
if "optimal" in term_condition:
|
|
1165
|
+
logger.info(
|
|
1166
|
+
f"Success determined by termination condition: {result.termination_condition}"
|
|
1167
|
+
)
|
|
966
1168
|
return True
|
|
967
|
-
|
|
1169
|
+
|
|
968
1170
|
# Check if we have a valid objective value
|
|
969
|
-
if objective_value is not None and not (
|
|
970
|
-
|
|
1171
|
+
if objective_value is not None and not (
|
|
1172
|
+
objective_value == 0 and status == "unknown"
|
|
1173
|
+
):
|
|
1174
|
+
logger.info(
|
|
1175
|
+
f"Success determined by valid objective value: {objective_value}"
|
|
1176
|
+
)
|
|
971
1177
|
return True
|
|
972
|
-
|
|
1178
|
+
|
|
973
1179
|
# Check solver-specific success indicators
|
|
974
|
-
if hasattr(result,
|
|
1180
|
+
if hasattr(result, "solver"):
|
|
975
1181
|
solver_info = result.solver
|
|
976
|
-
if hasattr(solver_info,
|
|
1182
|
+
if hasattr(solver_info, "termination_condition"):
|
|
977
1183
|
term_condition = str(solver_info.termination_condition).lower()
|
|
978
|
-
if
|
|
979
|
-
logger.info(
|
|
1184
|
+
if "optimal" in term_condition:
|
|
1185
|
+
logger.info(
|
|
1186
|
+
f"Success determined by solver termination condition: {solver_info.termination_condition}"
|
|
1187
|
+
)
|
|
980
1188
|
return True
|
|
981
|
-
|
|
982
|
-
logger.warning(
|
|
1189
|
+
|
|
1190
|
+
logger.warning(
|
|
1191
|
+
f"Could not determine success: status={status}, objective={objective_value}, result_attrs={dir(result) if result else 'None'}"
|
|
1192
|
+
)
|
|
983
1193
|
return False
|
|
984
|
-
|
|
1194
|
+
|
|
985
1195
|
except Exception as e:
|
|
986
1196
|
logger.error(f"Error determining solve success: {e}")
|
|
987
1197
|
return False
|
|
988
|
-
|
|
1198
|
+
|
|
989
1199
|
def _convert_pypsa_result_to_dict(self, result) -> Dict[str, Any]:
|
|
990
1200
|
"""
|
|
991
1201
|
Convert PyPSA result object to dictionary.
|
|
992
|
-
|
|
1202
|
+
|
|
993
1203
|
Args:
|
|
994
1204
|
result: PyPSA solve result object
|
|
995
|
-
|
|
1205
|
+
|
|
996
1206
|
Returns:
|
|
997
1207
|
Dictionary representation of the result
|
|
998
1208
|
"""
|
|
999
1209
|
try:
|
|
1000
1210
|
if result is None:
|
|
1001
1211
|
return {"status": "no_result"}
|
|
1002
|
-
|
|
1212
|
+
|
|
1003
1213
|
result_dict = {}
|
|
1004
|
-
|
|
1214
|
+
|
|
1005
1215
|
# Extract common attributes
|
|
1006
|
-
for attr in [
|
|
1216
|
+
for attr in ["status", "success", "termination_condition", "solver"]:
|
|
1007
1217
|
if hasattr(result, attr):
|
|
1008
1218
|
value = getattr(result, attr)
|
|
1009
1219
|
# Convert to serializable format
|
|
1010
|
-
if hasattr(value,
|
|
1220
|
+
if hasattr(value, "__dict__"):
|
|
1011
1221
|
result_dict[attr] = str(value)
|
|
1012
1222
|
else:
|
|
1013
1223
|
result_dict[attr] = value
|
|
1014
|
-
|
|
1224
|
+
|
|
1015
1225
|
# Handle solver-specific information
|
|
1016
|
-
if hasattr(result,
|
|
1017
|
-
solver_results = getattr(result,
|
|
1018
|
-
if hasattr(solver_results,
|
|
1019
|
-
result_dict[
|
|
1226
|
+
if hasattr(result, "solver_results"):
|
|
1227
|
+
solver_results = getattr(result, "solver_results")
|
|
1228
|
+
if hasattr(solver_results, "__dict__"):
|
|
1229
|
+
result_dict["solver_results"] = str(solver_results)
|
|
1020
1230
|
else:
|
|
1021
|
-
result_dict[
|
|
1022
|
-
|
|
1231
|
+
result_dict["solver_results"] = solver_results
|
|
1232
|
+
|
|
1023
1233
|
return result_dict
|
|
1024
|
-
|
|
1234
|
+
|
|
1025
1235
|
except Exception as e:
|
|
1026
1236
|
logger.warning(f"Failed to convert PyPSA result to dict: {e}")
|
|
1027
1237
|
return {"status": "conversion_failed", "error": str(e)}
|
|
1028
|
-
|
|
1029
|
-
def _calculate_comprehensive_network_statistics(
|
|
1238
|
+
|
|
1239
|
+
def _calculate_comprehensive_network_statistics(
|
|
1240
|
+
self, network: "pypsa.Network", solve_time: float, solver_name: str
|
|
1241
|
+
) -> Dict[str, Any]:
|
|
1030
1242
|
"""Calculate comprehensive network statistics including PyPSA statistics and custom metrics"""
|
|
1031
1243
|
try:
|
|
1032
1244
|
# Initialize statistics structure
|
|
@@ -1035,75 +1247,106 @@ class NetworkSolver:
|
|
|
1035
1247
|
"pypsa_statistics": {},
|
|
1036
1248
|
"custom_statistics": {},
|
|
1037
1249
|
"runtime_info": {},
|
|
1038
|
-
"solver_info": {}
|
|
1250
|
+
"solver_info": {},
|
|
1039
1251
|
}
|
|
1040
|
-
|
|
1252
|
+
|
|
1041
1253
|
# Core summary statistics
|
|
1042
1254
|
total_generation = 0
|
|
1043
1255
|
total_demand = 0
|
|
1044
1256
|
unserved_energy = 0
|
|
1045
|
-
|
|
1257
|
+
|
|
1046
1258
|
# Calculate generation statistics
|
|
1047
|
-
if hasattr(network,
|
|
1259
|
+
if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
|
|
1048
1260
|
# Apply snapshot weightings to convert MW to MWh
|
|
1049
1261
|
weightings = network.snapshot_weightings
|
|
1050
1262
|
if isinstance(weightings, pd.DataFrame):
|
|
1051
|
-
if
|
|
1052
|
-
weighting_values = weightings[
|
|
1263
|
+
if "objective" in weightings.columns:
|
|
1264
|
+
weighting_values = weightings["objective"].values
|
|
1053
1265
|
else:
|
|
1054
1266
|
weighting_values = weightings.iloc[:, 0].values
|
|
1055
1267
|
else:
|
|
1056
1268
|
weighting_values = weightings.values
|
|
1057
|
-
|
|
1058
|
-
total_generation = float(
|
|
1059
|
-
|
|
1269
|
+
|
|
1270
|
+
total_generation = float(
|
|
1271
|
+
(network.generators_t.p.values * weighting_values[:, None]).sum()
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1060
1274
|
# Calculate unserved energy from UNMET_LOAD generators
|
|
1061
|
-
if hasattr(network,
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1275
|
+
if hasattr(network, "generators") and hasattr(
|
|
1276
|
+
network, "_component_type_map"
|
|
1277
|
+
):
|
|
1278
|
+
unmet_load_gen_names = [
|
|
1279
|
+
name
|
|
1280
|
+
for name, comp_type in network._component_type_map.items()
|
|
1281
|
+
if comp_type == "UNMET_LOAD"
|
|
1282
|
+
]
|
|
1283
|
+
|
|
1065
1284
|
for gen_name in unmet_load_gen_names:
|
|
1066
1285
|
if gen_name in network.generators_t.p.columns:
|
|
1067
|
-
gen_output = float(
|
|
1286
|
+
gen_output = float(
|
|
1287
|
+
(
|
|
1288
|
+
network.generators_t.p[gen_name] * weighting_values
|
|
1289
|
+
).sum()
|
|
1290
|
+
)
|
|
1068
1291
|
unserved_energy += gen_output
|
|
1069
|
-
|
|
1292
|
+
|
|
1070
1293
|
# Calculate demand statistics
|
|
1071
|
-
if hasattr(network,
|
|
1294
|
+
if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
|
|
1072
1295
|
weightings = network.snapshot_weightings
|
|
1073
1296
|
if isinstance(weightings, pd.DataFrame):
|
|
1074
|
-
if
|
|
1075
|
-
weighting_values = weightings[
|
|
1297
|
+
if "objective" in weightings.columns:
|
|
1298
|
+
weighting_values = weightings["objective"].values
|
|
1076
1299
|
else:
|
|
1077
1300
|
weighting_values = weightings.iloc[:, 0].values
|
|
1078
1301
|
else:
|
|
1079
1302
|
weighting_values = weightings.values
|
|
1080
|
-
|
|
1081
|
-
total_demand = float(
|
|
1082
|
-
|
|
1303
|
+
|
|
1304
|
+
total_demand = float(
|
|
1305
|
+
(network.loads_t.p.values * weighting_values[:, None]).sum()
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1083
1308
|
statistics["core_summary"] = {
|
|
1084
1309
|
"total_generation_mwh": total_generation,
|
|
1085
1310
|
"total_demand_mwh": total_demand,
|
|
1086
|
-
"total_cost":
|
|
1087
|
-
|
|
1088
|
-
|
|
1311
|
+
"total_cost": (
|
|
1312
|
+
float(network.objective) if hasattr(network, "objective") else None
|
|
1313
|
+
),
|
|
1314
|
+
"load_factor": (
|
|
1315
|
+
(total_demand / (total_generation + 1e-6))
|
|
1316
|
+
if total_generation > 0
|
|
1317
|
+
else 0
|
|
1318
|
+
),
|
|
1319
|
+
"unserved_energy_mwh": unserved_energy,
|
|
1089
1320
|
}
|
|
1090
|
-
|
|
1321
|
+
|
|
1091
1322
|
# Calculate PyPSA statistics
|
|
1092
1323
|
try:
|
|
1093
1324
|
pypsa_stats = network.statistics()
|
|
1094
1325
|
if pypsa_stats is not None and not pypsa_stats.empty:
|
|
1095
|
-
statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(
|
|
1326
|
+
statistics["pypsa_statistics"] = self._convert_pypsa_result_to_dict(
|
|
1327
|
+
pypsa_stats
|
|
1328
|
+
)
|
|
1096
1329
|
else:
|
|
1097
1330
|
statistics["pypsa_statistics"] = {}
|
|
1098
1331
|
except Exception as e:
|
|
1099
1332
|
logger.error(f"Failed to calculate PyPSA statistics: {e}")
|
|
1100
1333
|
statistics["pypsa_statistics"] = {}
|
|
1101
|
-
|
|
1334
|
+
|
|
1102
1335
|
# Custom statistics - calculate detailed breakdowns
|
|
1103
|
-
total_cost =
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1336
|
+
total_cost = (
|
|
1337
|
+
float(network.objective) if hasattr(network, "objective") else 0.0
|
|
1338
|
+
)
|
|
1339
|
+
avg_price = (
|
|
1340
|
+
(total_cost / (total_generation + 1e-6))
|
|
1341
|
+
if total_generation > 0
|
|
1342
|
+
else None
|
|
1343
|
+
)
|
|
1344
|
+
unmet_load_percentage = (
|
|
1345
|
+
(unserved_energy / (total_demand + 1e-6)) * 100
|
|
1346
|
+
if total_demand > 0
|
|
1347
|
+
else 0
|
|
1348
|
+
)
|
|
1349
|
+
|
|
1107
1350
|
# Note: For solver statistics, we keep simplified approach since this is just for logging
|
|
1108
1351
|
# The storage module will calculate proper totals from carrier statistics
|
|
1109
1352
|
statistics["custom_statistics"] = {
|
|
@@ -1113,86 +1356,121 @@ class NetworkSolver:
|
|
|
1113
1356
|
"total_emissions_tons_co2": 0.0, # Will be calculated properly in storage module
|
|
1114
1357
|
"average_price_per_mwh": avg_price,
|
|
1115
1358
|
"unmet_load_percentage": unmet_load_percentage,
|
|
1116
|
-
"max_unmet_load_hour_mw": 0.0 # TODO: Calculate max hourly unmet load
|
|
1359
|
+
"max_unmet_load_hour_mw": 0.0, # TODO: Calculate max hourly unmet load
|
|
1117
1360
|
}
|
|
1118
|
-
|
|
1361
|
+
|
|
1119
1362
|
# Runtime info
|
|
1120
1363
|
unmet_load_count = 0
|
|
1121
|
-
if hasattr(network,
|
|
1122
|
-
unmet_load_count = len(
|
|
1123
|
-
|
|
1124
|
-
|
|
1364
|
+
if hasattr(network, "_component_type_map"):
|
|
1365
|
+
unmet_load_count = len(
|
|
1366
|
+
[
|
|
1367
|
+
name
|
|
1368
|
+
for name, comp_type in network._component_type_map.items()
|
|
1369
|
+
if comp_type == "UNMET_LOAD"
|
|
1370
|
+
]
|
|
1371
|
+
)
|
|
1372
|
+
|
|
1125
1373
|
statistics["runtime_info"] = {
|
|
1126
1374
|
"solve_time_seconds": solve_time,
|
|
1127
1375
|
"component_count": (
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1376
|
+
(
|
|
1377
|
+
len(network.buses)
|
|
1378
|
+
+ len(network.generators)
|
|
1379
|
+
+ len(network.loads)
|
|
1380
|
+
+ len(network.lines)
|
|
1381
|
+
+ len(network.links)
|
|
1382
|
+
)
|
|
1383
|
+
if hasattr(network, "buses")
|
|
1384
|
+
else 0
|
|
1385
|
+
),
|
|
1386
|
+
"bus_count": len(network.buses) if hasattr(network, "buses") else 0,
|
|
1387
|
+
"generator_count": (
|
|
1388
|
+
len(network.generators) if hasattr(network, "generators") else 0
|
|
1389
|
+
),
|
|
1133
1390
|
"unmet_load_count": unmet_load_count,
|
|
1134
|
-
"load_count": len(network.loads) if hasattr(network,
|
|
1135
|
-
"line_count": len(network.lines) if hasattr(network,
|
|
1136
|
-
"snapshot_count":
|
|
1391
|
+
"load_count": len(network.loads) if hasattr(network, "loads") else 0,
|
|
1392
|
+
"line_count": len(network.lines) if hasattr(network, "lines") else 0,
|
|
1393
|
+
"snapshot_count": (
|
|
1394
|
+
len(network.snapshots) if hasattr(network, "snapshots") else 0
|
|
1395
|
+
),
|
|
1137
1396
|
}
|
|
1138
|
-
|
|
1397
|
+
|
|
1139
1398
|
# Solver info
|
|
1140
1399
|
statistics["solver_info"] = {
|
|
1141
1400
|
"solver_name": solver_name,
|
|
1142
|
-
"termination_condition":
|
|
1143
|
-
|
|
1401
|
+
"termination_condition": (
|
|
1402
|
+
"optimal" if hasattr(network, "objective") else "unknown"
|
|
1403
|
+
),
|
|
1404
|
+
"objective_value": (
|
|
1405
|
+
float(network.objective) if hasattr(network, "objective") else None
|
|
1406
|
+
),
|
|
1144
1407
|
}
|
|
1145
|
-
|
|
1408
|
+
|
|
1146
1409
|
return statistics
|
|
1147
|
-
|
|
1410
|
+
|
|
1148
1411
|
except Exception as e:
|
|
1149
|
-
logger.error(
|
|
1412
|
+
logger.error(
|
|
1413
|
+
f"Failed to calculate comprehensive network statistics: {e}",
|
|
1414
|
+
exc_info=True,
|
|
1415
|
+
)
|
|
1150
1416
|
return {
|
|
1151
1417
|
"error": str(e),
|
|
1152
1418
|
"core_summary": {},
|
|
1153
1419
|
"pypsa_statistics": {},
|
|
1154
1420
|
"custom_statistics": {},
|
|
1155
1421
|
"runtime_info": {"solve_time_seconds": solve_time},
|
|
1156
|
-
"solver_info": {"solver_name": solver_name}
|
|
1422
|
+
"solver_info": {"solver_name": solver_name},
|
|
1157
1423
|
}
|
|
1158
|
-
|
|
1159
|
-
def _calculate_statistics_by_year(
|
|
1424
|
+
|
|
1425
|
+
def _calculate_statistics_by_year(
|
|
1426
|
+
self, network: "pypsa.Network", solve_time: float, solver_name: str
|
|
1427
|
+
) -> Dict[int, Dict[str, Any]]:
|
|
1160
1428
|
"""Calculate statistics for each year in the network"""
|
|
1161
1429
|
try:
|
|
1162
1430
|
# Extract years from network snapshots or manually extracted years
|
|
1163
|
-
if hasattr(network.snapshots,
|
|
1431
|
+
if hasattr(network.snapshots, "year"):
|
|
1164
1432
|
years = sorted(network.snapshots.year.unique())
|
|
1165
|
-
elif hasattr(network,
|
|
1433
|
+
elif hasattr(network, "_available_years"):
|
|
1166
1434
|
years = network._available_years
|
|
1167
|
-
elif hasattr(network.snapshots,
|
|
1435
|
+
elif hasattr(network.snapshots, "levels"):
|
|
1168
1436
|
# Multi-period optimization - get years from period level
|
|
1169
1437
|
period_values = network.snapshots.get_level_values(0)
|
|
1170
1438
|
years = sorted(period_values.unique())
|
|
1171
1439
|
else:
|
|
1172
1440
|
# If no year info, skip year-based calculations
|
|
1173
|
-
logger.info(
|
|
1441
|
+
logger.info(
|
|
1442
|
+
"No year information found in network - skipping year-based statistics"
|
|
1443
|
+
)
|
|
1174
1444
|
return {}
|
|
1175
|
-
|
|
1445
|
+
|
|
1176
1446
|
logger.info(f"Calculating year-based statistics for years: {years}")
|
|
1177
1447
|
year_statistics = {}
|
|
1178
|
-
|
|
1448
|
+
|
|
1179
1449
|
for year in years:
|
|
1180
1450
|
try:
|
|
1181
|
-
year_stats = self._calculate_network_statistics_for_year(
|
|
1451
|
+
year_stats = self._calculate_network_statistics_for_year(
|
|
1452
|
+
network, year, solve_time, solver_name
|
|
1453
|
+
)
|
|
1182
1454
|
year_statistics[year] = year_stats
|
|
1183
1455
|
logger.info(f"Calculated statistics for year {year}")
|
|
1184
1456
|
except Exception as e:
|
|
1185
1457
|
logger.error(f"Failed to calculate statistics for year {year}: {e}")
|
|
1186
1458
|
continue
|
|
1187
|
-
|
|
1188
|
-
logger.info(
|
|
1459
|
+
|
|
1460
|
+
logger.info(
|
|
1461
|
+
f"Successfully calculated year-based statistics for {len(year_statistics)} years"
|
|
1462
|
+
)
|
|
1189
1463
|
return year_statistics
|
|
1190
|
-
|
|
1464
|
+
|
|
1191
1465
|
except Exception as e:
|
|
1192
|
-
logger.error(
|
|
1466
|
+
logger.error(
|
|
1467
|
+
f"Failed to calculate year-based statistics: {e}", exc_info=True
|
|
1468
|
+
)
|
|
1193
1469
|
return {}
|
|
1194
|
-
|
|
1195
|
-
def _calculate_network_statistics_for_year(
|
|
1470
|
+
|
|
1471
|
+
def _calculate_network_statistics_for_year(
|
|
1472
|
+
self, network: "pypsa.Network", year: int, solve_time: float, solver_name: str
|
|
1473
|
+
) -> Dict[str, Any]:
|
|
1196
1474
|
"""Calculate network statistics for a specific year"""
|
|
1197
1475
|
try:
|
|
1198
1476
|
# Initialize statistics structure
|
|
@@ -1200,181 +1478,208 @@ class NetworkSolver:
|
|
|
1200
1478
|
"core_summary": {},
|
|
1201
1479
|
"custom_statistics": {},
|
|
1202
1480
|
"runtime_info": {},
|
|
1203
|
-
"solver_info": {}
|
|
1481
|
+
"solver_info": {},
|
|
1204
1482
|
}
|
|
1205
|
-
|
|
1483
|
+
|
|
1206
1484
|
# Core summary statistics for this year
|
|
1207
1485
|
total_generation = 0
|
|
1208
1486
|
total_demand = 0
|
|
1209
1487
|
unserved_energy = 0
|
|
1210
|
-
|
|
1488
|
+
|
|
1211
1489
|
# Calculate generation statistics for this year
|
|
1212
|
-
if hasattr(network,
|
|
1490
|
+
if hasattr(network, "generators_t") and hasattr(network.generators_t, "p"):
|
|
1213
1491
|
# Filter by year
|
|
1214
|
-
year_generation = self._filter_timeseries_by_year(
|
|
1492
|
+
year_generation = self._filter_timeseries_by_year(
|
|
1493
|
+
network.generators_t.p, network.snapshots, year
|
|
1494
|
+
)
|
|
1215
1495
|
if year_generation is not None and not year_generation.empty:
|
|
1216
1496
|
# Apply snapshot weightings for this year
|
|
1217
1497
|
year_weightings = self._get_year_weightings(network, year)
|
|
1218
1498
|
if year_weightings is not None:
|
|
1219
|
-
total_generation = float(
|
|
1499
|
+
total_generation = float(
|
|
1500
|
+
(year_generation.values * year_weightings[:, None]).sum()
|
|
1501
|
+
)
|
|
1220
1502
|
else:
|
|
1221
1503
|
total_generation = float(year_generation.sum().sum())
|
|
1222
|
-
|
|
1504
|
+
|
|
1223
1505
|
# Calculate unserved energy for this year
|
|
1224
|
-
if hasattr(network,
|
|
1225
|
-
unmet_load_gen_names = [
|
|
1226
|
-
|
|
1227
|
-
|
|
1506
|
+
if hasattr(network, "_component_type_map"):
|
|
1507
|
+
unmet_load_gen_names = [
|
|
1508
|
+
name
|
|
1509
|
+
for name, comp_type in network._component_type_map.items()
|
|
1510
|
+
if comp_type == "UNMET_LOAD"
|
|
1511
|
+
]
|
|
1512
|
+
|
|
1228
1513
|
for gen_name in unmet_load_gen_names:
|
|
1229
1514
|
if gen_name in year_generation.columns:
|
|
1230
1515
|
if year_weightings is not None:
|
|
1231
|
-
gen_output = float(
|
|
1516
|
+
gen_output = float(
|
|
1517
|
+
(
|
|
1518
|
+
year_generation[gen_name] * year_weightings
|
|
1519
|
+
).sum()
|
|
1520
|
+
)
|
|
1232
1521
|
else:
|
|
1233
1522
|
gen_output = float(year_generation[gen_name].sum())
|
|
1234
1523
|
unserved_energy += gen_output
|
|
1235
|
-
|
|
1524
|
+
|
|
1236
1525
|
# Calculate demand statistics for this year
|
|
1237
|
-
if hasattr(network,
|
|
1238
|
-
year_demand = self._filter_timeseries_by_year(
|
|
1526
|
+
if hasattr(network, "loads_t") and hasattr(network.loads_t, "p"):
|
|
1527
|
+
year_demand = self._filter_timeseries_by_year(
|
|
1528
|
+
network.loads_t.p, network.snapshots, year
|
|
1529
|
+
)
|
|
1239
1530
|
if year_demand is not None and not year_demand.empty:
|
|
1240
1531
|
year_weightings = self._get_year_weightings(network, year)
|
|
1241
1532
|
if year_weightings is not None:
|
|
1242
|
-
total_demand = float(
|
|
1533
|
+
total_demand = float(
|
|
1534
|
+
(year_demand.values * year_weightings[:, None]).sum()
|
|
1535
|
+
)
|
|
1243
1536
|
else:
|
|
1244
1537
|
total_demand = float(year_demand.sum().sum())
|
|
1245
|
-
|
|
1538
|
+
|
|
1246
1539
|
statistics["core_summary"] = {
|
|
1247
1540
|
"total_generation_mwh": total_generation,
|
|
1248
1541
|
"total_demand_mwh": total_demand,
|
|
1249
1542
|
"total_cost": None, # Year-specific cost calculation would be complex
|
|
1250
|
-
"load_factor": (
|
|
1251
|
-
|
|
1543
|
+
"load_factor": (
|
|
1544
|
+
(total_demand / (total_generation + 1e-6))
|
|
1545
|
+
if total_generation > 0
|
|
1546
|
+
else 0
|
|
1547
|
+
),
|
|
1548
|
+
"unserved_energy_mwh": unserved_energy,
|
|
1252
1549
|
}
|
|
1253
|
-
|
|
1550
|
+
|
|
1254
1551
|
# Custom statistics
|
|
1255
|
-
unmet_load_percentage = (
|
|
1256
|
-
|
|
1552
|
+
unmet_load_percentage = (
|
|
1553
|
+
(unserved_energy / (total_demand + 1e-6)) * 100
|
|
1554
|
+
if total_demand > 0
|
|
1555
|
+
else 0
|
|
1556
|
+
)
|
|
1557
|
+
|
|
1257
1558
|
# Calculate year-specific carrier statistics
|
|
1258
1559
|
year_carrier_stats = self._calculate_year_carrier_statistics(network, year)
|
|
1259
|
-
|
|
1560
|
+
|
|
1260
1561
|
statistics["custom_statistics"] = {
|
|
1261
1562
|
"unmet_load_percentage": unmet_load_percentage,
|
|
1262
1563
|
"year": year,
|
|
1263
|
-
**year_carrier_stats # Include all carrier-specific statistics for this year
|
|
1564
|
+
**year_carrier_stats, # Include all carrier-specific statistics for this year
|
|
1264
1565
|
}
|
|
1265
|
-
|
|
1566
|
+
|
|
1266
1567
|
# Runtime info
|
|
1267
1568
|
year_snapshot_count = self._count_year_snapshots(network.snapshots, year)
|
|
1268
|
-
|
|
1569
|
+
|
|
1269
1570
|
statistics["runtime_info"] = {
|
|
1270
1571
|
"solve_time_seconds": solve_time,
|
|
1271
1572
|
"year": year,
|
|
1272
|
-
"snapshot_count": year_snapshot_count
|
|
1573
|
+
"snapshot_count": year_snapshot_count,
|
|
1273
1574
|
}
|
|
1274
|
-
|
|
1575
|
+
|
|
1275
1576
|
# Solver info
|
|
1276
|
-
statistics["solver_info"] = {
|
|
1277
|
-
|
|
1278
|
-
"year": year
|
|
1279
|
-
}
|
|
1280
|
-
|
|
1577
|
+
statistics["solver_info"] = {"solver_name": solver_name, "year": year}
|
|
1578
|
+
|
|
1281
1579
|
return statistics
|
|
1282
|
-
|
|
1580
|
+
|
|
1283
1581
|
except Exception as e:
|
|
1284
|
-
logger.error(
|
|
1582
|
+
logger.error(
|
|
1583
|
+
f"Failed to calculate network statistics for year {year}: {e}",
|
|
1584
|
+
exc_info=True,
|
|
1585
|
+
)
|
|
1285
1586
|
return {
|
|
1286
1587
|
"error": str(e),
|
|
1287
1588
|
"core_summary": {},
|
|
1288
1589
|
"custom_statistics": {"year": year},
|
|
1289
1590
|
"runtime_info": {"solve_time_seconds": solve_time, "year": year},
|
|
1290
|
-
"solver_info": {"solver_name": solver_name, "year": year}
|
|
1591
|
+
"solver_info": {"solver_name": solver_name, "year": year},
|
|
1291
1592
|
}
|
|
1292
|
-
|
|
1293
|
-
def _filter_timeseries_by_year(
|
|
1593
|
+
|
|
1594
|
+
def _filter_timeseries_by_year(
|
|
1595
|
+
self, timeseries_df: "pd.DataFrame", snapshots: "pd.Index", year: int
|
|
1596
|
+
) -> "pd.DataFrame":
|
|
1294
1597
|
"""Filter timeseries data by year"""
|
|
1295
1598
|
try:
|
|
1296
1599
|
# Handle MultiIndex case (multi-period optimization)
|
|
1297
|
-
if hasattr(snapshots,
|
|
1600
|
+
if hasattr(snapshots, "levels"):
|
|
1298
1601
|
period_values = snapshots.get_level_values(0)
|
|
1299
1602
|
year_mask = period_values == year
|
|
1300
1603
|
if year_mask.any():
|
|
1301
1604
|
year_snapshots = snapshots[year_mask]
|
|
1302
1605
|
return timeseries_df.loc[year_snapshots]
|
|
1303
|
-
|
|
1606
|
+
|
|
1304
1607
|
# Handle DatetimeIndex case (regular time series)
|
|
1305
|
-
elif hasattr(snapshots,
|
|
1608
|
+
elif hasattr(snapshots, "year"):
|
|
1306
1609
|
year_mask = snapshots.year == year
|
|
1307
1610
|
if year_mask.any():
|
|
1308
1611
|
return timeseries_df.loc[year_mask]
|
|
1309
|
-
|
|
1612
|
+
|
|
1310
1613
|
# Fallback - return None if can't filter
|
|
1311
1614
|
return None
|
|
1312
|
-
|
|
1615
|
+
|
|
1313
1616
|
except Exception as e:
|
|
1314
1617
|
logger.error(f"Failed to filter timeseries by year {year}: {e}")
|
|
1315
1618
|
return None
|
|
1316
|
-
|
|
1317
|
-
def _get_year_weightings(self, network:
|
|
1619
|
+
|
|
1620
|
+
def _get_year_weightings(self, network: "pypsa.Network", year: int) -> "np.ndarray":
|
|
1318
1621
|
"""Get snapshot weightings for a specific year"""
|
|
1319
1622
|
try:
|
|
1320
1623
|
# Filter snapshot weightings by year
|
|
1321
|
-
if hasattr(network.snapshots,
|
|
1624
|
+
if hasattr(network.snapshots, "levels"):
|
|
1322
1625
|
period_values = network.snapshots.get_level_values(0)
|
|
1323
1626
|
year_mask = period_values == year
|
|
1324
1627
|
if year_mask.any():
|
|
1325
1628
|
year_snapshots = network.snapshots[year_mask]
|
|
1326
1629
|
year_weightings = network.snapshot_weightings.loc[year_snapshots]
|
|
1327
1630
|
if isinstance(year_weightings, pd.DataFrame):
|
|
1328
|
-
if
|
|
1329
|
-
return year_weightings[
|
|
1631
|
+
if "objective" in year_weightings.columns:
|
|
1632
|
+
return year_weightings["objective"].values
|
|
1330
1633
|
else:
|
|
1331
1634
|
return year_weightings.iloc[:, 0].values
|
|
1332
1635
|
else:
|
|
1333
1636
|
return year_weightings.values
|
|
1334
|
-
|
|
1335
|
-
elif hasattr(network.snapshots,
|
|
1637
|
+
|
|
1638
|
+
elif hasattr(network.snapshots, "year"):
|
|
1336
1639
|
year_mask = network.snapshots.year == year
|
|
1337
1640
|
if year_mask.any():
|
|
1338
1641
|
year_weightings = network.snapshot_weightings.loc[year_mask]
|
|
1339
1642
|
if isinstance(year_weightings, pd.DataFrame):
|
|
1340
|
-
if
|
|
1341
|
-
return year_weightings[
|
|
1643
|
+
if "objective" in year_weightings.columns:
|
|
1644
|
+
return year_weightings["objective"].values
|
|
1342
1645
|
else:
|
|
1343
1646
|
return year_weightings.iloc[:, 0].values
|
|
1344
1647
|
else:
|
|
1345
1648
|
return year_weightings.values
|
|
1346
|
-
|
|
1649
|
+
|
|
1347
1650
|
return None
|
|
1348
|
-
|
|
1651
|
+
|
|
1349
1652
|
except Exception as e:
|
|
1350
1653
|
logger.error(f"Failed to get year weightings for year {year}: {e}")
|
|
1351
1654
|
return None
|
|
1352
|
-
|
|
1353
|
-
def _count_year_snapshots(self, snapshots:
|
|
1655
|
+
|
|
1656
|
+
def _count_year_snapshots(self, snapshots: "pd.Index", year: int) -> int:
|
|
1354
1657
|
"""Count snapshots for a specific year"""
|
|
1355
1658
|
try:
|
|
1356
1659
|
# Handle MultiIndex case
|
|
1357
|
-
if hasattr(snapshots,
|
|
1660
|
+
if hasattr(snapshots, "levels"):
|
|
1358
1661
|
period_values = snapshots.get_level_values(0)
|
|
1359
1662
|
year_mask = period_values == year
|
|
1360
1663
|
return year_mask.sum()
|
|
1361
|
-
|
|
1664
|
+
|
|
1362
1665
|
# Handle DatetimeIndex case
|
|
1363
|
-
elif hasattr(snapshots,
|
|
1666
|
+
elif hasattr(snapshots, "year"):
|
|
1364
1667
|
year_mask = snapshots.year == year
|
|
1365
1668
|
return year_mask.sum()
|
|
1366
|
-
|
|
1669
|
+
|
|
1367
1670
|
# Fallback
|
|
1368
1671
|
return 0
|
|
1369
|
-
|
|
1672
|
+
|
|
1370
1673
|
except Exception as e:
|
|
1371
1674
|
logger.error(f"Failed to count snapshots for year {year}: {e}")
|
|
1372
1675
|
return 0
|
|
1373
|
-
|
|
1374
|
-
def _calculate_year_carrier_statistics(
|
|
1676
|
+
|
|
1677
|
+
def _calculate_year_carrier_statistics(
|
|
1678
|
+
self, network: "pypsa.Network", year: int
|
|
1679
|
+
) -> Dict[str, Any]:
|
|
1375
1680
|
"""Calculate carrier-specific statistics for a specific year"""
|
|
1376
1681
|
# Note: This is a simplified implementation that doesn't have database access
|
|
1377
|
-
# The proper implementation should be done in the storage module where we have conn
|
|
1682
|
+
# The proper implementation should be done in the storage module where we have conn
|
|
1378
1683
|
# For now, return empty dictionaries - the storage module will handle this properly
|
|
1379
1684
|
return {
|
|
1380
1685
|
"dispatch_by_carrier": {},
|
|
@@ -1382,35 +1687,35 @@ class NetworkSolver:
|
|
|
1382
1687
|
"emissions_by_carrier": {},
|
|
1383
1688
|
"capital_cost_by_carrier": {},
|
|
1384
1689
|
"operational_cost_by_carrier": {},
|
|
1385
|
-
"total_system_cost_by_carrier": {}
|
|
1690
|
+
"total_system_cost_by_carrier": {},
|
|
1386
1691
|
}
|
|
1387
|
-
|
|
1692
|
+
|
|
1388
1693
|
def _get_generator_carrier_name(self, generator_name: str) -> Optional[str]:
|
|
1389
1694
|
"""Get carrier name for a generator - simplified implementation"""
|
|
1390
1695
|
# This is a simplified approach - in practice, this should query the database
|
|
1391
1696
|
# or use the component type mapping from the network
|
|
1392
|
-
|
|
1697
|
+
|
|
1393
1698
|
# Try to extract carrier from generator name patterns
|
|
1394
1699
|
gen_lower = generator_name.lower()
|
|
1395
|
-
|
|
1396
|
-
if
|
|
1397
|
-
return
|
|
1398
|
-
elif
|
|
1399
|
-
return
|
|
1400
|
-
elif
|
|
1401
|
-
return
|
|
1402
|
-
elif
|
|
1403
|
-
return
|
|
1404
|
-
elif
|
|
1405
|
-
return
|
|
1406
|
-
elif
|
|
1407
|
-
return
|
|
1408
|
-
elif
|
|
1409
|
-
return
|
|
1410
|
-
elif
|
|
1411
|
-
return
|
|
1412
|
-
elif
|
|
1413
|
-
return
|
|
1700
|
+
|
|
1701
|
+
if "coal" in gen_lower:
|
|
1702
|
+
return "coal"
|
|
1703
|
+
elif "gas" in gen_lower or "ccgt" in gen_lower or "ocgt" in gen_lower:
|
|
1704
|
+
return "gas"
|
|
1705
|
+
elif "nuclear" in gen_lower:
|
|
1706
|
+
return "nuclear"
|
|
1707
|
+
elif "solar" in gen_lower or "pv" in gen_lower:
|
|
1708
|
+
return "solar"
|
|
1709
|
+
elif "wind" in gen_lower:
|
|
1710
|
+
return "wind"
|
|
1711
|
+
elif "hydro" in gen_lower:
|
|
1712
|
+
return "hydro"
|
|
1713
|
+
elif "biomass" in gen_lower:
|
|
1714
|
+
return "biomass"
|
|
1715
|
+
elif "battery" in gen_lower:
|
|
1716
|
+
return "battery"
|
|
1717
|
+
elif "unmet" in gen_lower:
|
|
1718
|
+
return "Unmet Load"
|
|
1414
1719
|
else:
|
|
1415
1720
|
# Default to generator name if no pattern matches
|
|
1416
1721
|
return generator_name
|