pyconvexity 0.4.2.post1__py3-none-any.whl → 0.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/_version.py +1 -1
- pyconvexity/data/schema/03_validation_data.sql +2 -1
- pyconvexity/models/network.py +93 -2
- pyconvexity/solvers/pypsa/api.py +15 -10
- pyconvexity/solvers/pypsa/builder.py +16 -12
- pyconvexity/solvers/pypsa/solver.py +19 -231
- pyconvexity/solvers/pypsa/storage.py +22 -254
- pyconvexity-0.4.6.dist-info/METADATA +148 -0
- {pyconvexity-0.4.2.post1.dist-info → pyconvexity-0.4.6.dist-info}/RECORD +11 -11
- pyconvexity-0.4.2.post1.dist-info/METADATA +0 -47
- {pyconvexity-0.4.2.post1.dist-info → pyconvexity-0.4.6.dist-info}/WHEEL +0 -0
- {pyconvexity-0.4.2.post1.dist-info → pyconvexity-0.4.6.dist-info}/top_level.txt +0 -0
|
@@ -21,7 +21,15 @@ class NetworkSolver:
|
|
|
21
21
|
This ensures consistent behavior for both single-year and multi-year models.
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
|
-
def __init__(self):
|
|
24
|
+
def __init__(self, verbose: bool = False):
|
|
25
|
+
"""
|
|
26
|
+
Initialize NetworkSolver.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
verbose: Enable detailed logging output
|
|
30
|
+
"""
|
|
31
|
+
self.verbose = verbose
|
|
32
|
+
|
|
25
33
|
# Import PyPSA with error handling
|
|
26
34
|
try:
|
|
27
35
|
import pypsa
|
|
@@ -78,7 +86,6 @@ class NetworkSolver:
|
|
|
78
86
|
|
|
79
87
|
# Get default solver from user settings
|
|
80
88
|
default_solver = user_settings.get("default_solver", "highs")
|
|
81
|
-
logger.info(f"📖 Read default solver from user settings: {default_solver}")
|
|
82
89
|
|
|
83
90
|
# Validate that it's a known solver
|
|
84
91
|
known_solvers = [
|
|
@@ -152,8 +159,6 @@ class NetworkSolver:
|
|
|
152
159
|
start_time = time.time()
|
|
153
160
|
run_id = str(uuid.uuid4())
|
|
154
161
|
|
|
155
|
-
logger.info(f"Starting network solve with {solver_name}")
|
|
156
|
-
|
|
157
162
|
try:
|
|
158
163
|
# Get solver configuration
|
|
159
164
|
actual_solver_name, solver_config = self._get_solver_config(
|
|
@@ -165,11 +170,10 @@ class NetworkSolver:
|
|
|
165
170
|
effective_discount_rate = (
|
|
166
171
|
discount_rate if discount_rate is not None else 0.0
|
|
167
172
|
)
|
|
168
|
-
logger.info(f"Discount rate for solve: {effective_discount_rate}")
|
|
169
173
|
|
|
170
174
|
years = list(network.investment_periods)
|
|
171
175
|
|
|
172
|
-
logger.info(f"
|
|
176
|
+
logger.info(f"Solving with {actual_solver_name}: {len(years)} periods {years}, discount rate {effective_discount_rate}")
|
|
173
177
|
|
|
174
178
|
# Calculate investment period weightings with discount rate
|
|
175
179
|
self._calculate_investment_weightings(network, effective_discount_rate)
|
|
@@ -190,102 +194,28 @@ class NetworkSolver:
|
|
|
190
194
|
)
|
|
191
195
|
)
|
|
192
196
|
if optimization_constraints:
|
|
193
|
-
logger.info(
|
|
194
|
-
f"Found {len(optimization_constraints)} optimization constraints"
|
|
195
|
-
)
|
|
196
|
-
|
|
197
197
|
# Filter for model constraints only (network constraints already applied)
|
|
198
198
|
for constraint in optimization_constraints:
|
|
199
199
|
constraint_code = constraint.get("constraint_code", "")
|
|
200
200
|
constraint_type = self._detect_constraint_type(constraint_code)
|
|
201
|
-
constraint_name = constraint.get("name", "unknown")
|
|
202
201
|
|
|
203
202
|
if constraint_type == "model_constraint":
|
|
204
203
|
model_constraints.append(constraint)
|
|
205
|
-
logger.info(
|
|
206
|
-
f"Will apply model constraint during solve: {constraint_name}"
|
|
207
|
-
)
|
|
208
|
-
else:
|
|
209
|
-
logger.info(
|
|
210
|
-
f"Skipping network constraint (already applied): {constraint_name}"
|
|
211
|
-
)
|
|
212
204
|
|
|
213
|
-
logger.info(
|
|
214
|
-
f"Will apply {len(model_constraints)} model constraints during optimization"
|
|
215
|
-
)
|
|
216
|
-
|
|
217
|
-
# Create extra_functionality for model constraints only
|
|
218
205
|
if model_constraints:
|
|
219
206
|
extra_functionality = self._create_extra_functionality(
|
|
220
207
|
model_constraints, constraint_applicator
|
|
221
208
|
)
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
209
|
+
if self.verbose:
|
|
210
|
+
logger.info(
|
|
211
|
+
f"Prepared {len(model_constraints)} model constraints for optimization"
|
|
212
|
+
)
|
|
225
213
|
|
|
226
214
|
# NOTE: Model constraints are applied DURING solve via extra_functionality
|
|
227
215
|
# Network constraints were already applied to the network structure before solve
|
|
228
216
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
logger.info(f"Solver: {actual_solver_name}")
|
|
232
|
-
logger.info(f"Investment periods: {years}")
|
|
233
|
-
logger.info(f"Snapshots: {len(network.snapshots)} (MultiIndex)")
|
|
234
|
-
if solver_config:
|
|
235
|
-
logger.info(f"Solver options: {solver_config}")
|
|
236
|
-
logger.info(f"=== END PYPSA SOLVER DIAGNOSTICS ===")
|
|
237
|
-
|
|
238
|
-
# Always solve with multi-period optimization
|
|
239
|
-
logger.info(
|
|
240
|
-
f"Solving network with multi-period optimization using {actual_solver_name}"
|
|
241
|
-
)
|
|
242
|
-
|
|
243
|
-
# DEBUG: Check network structure before solving
|
|
244
|
-
logger.info(f"DEBUG: Network snapshots type: {type(network.snapshots)}")
|
|
245
|
-
logger.info(
|
|
246
|
-
f"DEBUG: Network snapshots names: {getattr(network.snapshots, 'names', 'No names')}"
|
|
247
|
-
)
|
|
248
|
-
logger.info(f"DEBUG: Network snapshots shape: {len(network.snapshots)}")
|
|
249
|
-
logger.info(f"DEBUG: First 3 snapshots: {network.snapshots[:3].tolist()}")
|
|
250
|
-
|
|
251
|
-
# Check some timeseries data structure
|
|
252
|
-
if hasattr(network, "generators_t") and hasattr(
|
|
253
|
-
network.generators_t, "p_max_pu"
|
|
254
|
-
):
|
|
255
|
-
if not network.generators_t.p_max_pu.empty:
|
|
256
|
-
logger.info(
|
|
257
|
-
f"DEBUG: generators_t.p_max_pu type: {type(network.generators_t.p_max_pu)}"
|
|
258
|
-
)
|
|
259
|
-
logger.info(
|
|
260
|
-
f"DEBUG: generators_t.p_max_pu index type: {type(network.generators_t.p_max_pu.index)}"
|
|
261
|
-
)
|
|
262
|
-
logger.info(
|
|
263
|
-
f"DEBUG: generators_t.p_max_pu index names: {getattr(network.generators_t.p_max_pu.index, 'names', 'No names')}"
|
|
264
|
-
)
|
|
265
|
-
logger.info(
|
|
266
|
-
f"DEBUG: generators_t.p_max_pu shape: {network.generators_t.p_max_pu.shape}"
|
|
267
|
-
)
|
|
268
|
-
logger.info(
|
|
269
|
-
f"DEBUG: First 3 p_max_pu index values: {network.generators_t.p_max_pu.index[:3].tolist()}"
|
|
270
|
-
)
|
|
271
|
-
|
|
272
|
-
if hasattr(network, "loads_t") and hasattr(network.loads_t, "p_set"):
|
|
273
|
-
if not network.loads_t.p_set.empty:
|
|
274
|
-
logger.info(
|
|
275
|
-
f"DEBUG: loads_t.p_set type: {type(network.loads_t.p_set)}"
|
|
276
|
-
)
|
|
277
|
-
logger.info(
|
|
278
|
-
f"DEBUG: loads_t.p_set index type: {type(network.loads_t.p_set.index)}"
|
|
279
|
-
)
|
|
280
|
-
logger.info(
|
|
281
|
-
f"DEBUG: loads_t.p_set index names: {getattr(network.loads_t.p_set.index, 'names', 'No names')}"
|
|
282
|
-
)
|
|
283
|
-
logger.info(
|
|
284
|
-
f"DEBUG: loads_t.p_set shape: {network.loads_t.p_set.shape}"
|
|
285
|
-
)
|
|
286
|
-
logger.info(
|
|
287
|
-
f"DEBUG: First 3 p_set index values: {network.loads_t.p_set.index[:3].tolist()}"
|
|
288
|
-
)
|
|
217
|
+
if self.verbose:
|
|
218
|
+
logger.info(f"Snapshots: {len(network.snapshots)}, Solver options: {solver_config}")
|
|
289
219
|
|
|
290
220
|
if solver_config:
|
|
291
221
|
result = network.optimize(
|
|
@@ -303,32 +233,6 @@ class NetworkSolver:
|
|
|
303
233
|
|
|
304
234
|
solve_time = time.time() - start_time
|
|
305
235
|
|
|
306
|
-
# Post-solve debug logging (matches old code)
|
|
307
|
-
objective_value = getattr(network, "objective", None)
|
|
308
|
-
if objective_value is not None:
|
|
309
|
-
logger.info(f"[DEBUG] POST-SOLVE snapshot_weightings structure:")
|
|
310
|
-
if hasattr(network, "snapshot_weightings"):
|
|
311
|
-
logger.info(f"[DEBUG] Type: {type(network.snapshot_weightings)}")
|
|
312
|
-
logger.info(
|
|
313
|
-
f"[DEBUG] Columns: {list(network.snapshot_weightings.columns)}"
|
|
314
|
-
)
|
|
315
|
-
logger.info(f"[DEBUG] Shape: {network.snapshot_weightings.shape}")
|
|
316
|
-
logger.info(
|
|
317
|
-
f"[DEBUG] Unique values in objective column: {network.snapshot_weightings['objective'].unique()}"
|
|
318
|
-
)
|
|
319
|
-
logger.info(
|
|
320
|
-
f"[DEBUG] Sum of objective column: {network.snapshot_weightings['objective'].sum()}"
|
|
321
|
-
)
|
|
322
|
-
|
|
323
|
-
if hasattr(network, "investment_period_weightings"):
|
|
324
|
-
logger.info(f"[DEBUG] investment_period_weightings exists:")
|
|
325
|
-
logger.info(
|
|
326
|
-
f"[DEBUG] Type: {type(network.investment_period_weightings)}"
|
|
327
|
-
)
|
|
328
|
-
logger.info(
|
|
329
|
-
f"[DEBUG] Content:\n{network.investment_period_weightings}"
|
|
330
|
-
)
|
|
331
|
-
|
|
332
236
|
# Extract solve results with comprehensive statistics
|
|
333
237
|
solve_result = self._extract_solve_results(
|
|
334
238
|
network, result, solve_time, actual_solver_name, run_id
|
|
@@ -336,31 +240,22 @@ class NetworkSolver:
|
|
|
336
240
|
|
|
337
241
|
# Calculate comprehensive network statistics (all years combined)
|
|
338
242
|
if solve_result.get("success"):
|
|
339
|
-
logger.info("Calculating comprehensive network statistics...")
|
|
340
243
|
network_statistics = self._calculate_comprehensive_network_statistics(
|
|
341
244
|
network, solve_time, actual_solver_name
|
|
342
245
|
)
|
|
343
246
|
solve_result["network_statistics"] = network_statistics
|
|
344
247
|
|
|
345
248
|
# Calculate year-based statistics for capacity expansion analysis
|
|
346
|
-
logger.info("Calculating year-based statistics...")
|
|
347
249
|
year_statistics = self._calculate_statistics_by_year(
|
|
348
250
|
network, solve_time, actual_solver_name
|
|
349
251
|
)
|
|
350
252
|
solve_result["year_statistics"] = year_statistics
|
|
351
253
|
solve_result["year_statistics_available"] = len(year_statistics) > 0
|
|
352
254
|
|
|
255
|
+
objective_value = getattr(network, "objective", None)
|
|
353
256
|
logger.info(
|
|
354
|
-
f"Solve completed in {solve_time:.2f}
|
|
355
|
-
)
|
|
356
|
-
logger.info(f"PyPSA result object: {result}")
|
|
357
|
-
logger.info(
|
|
358
|
-
f"PyPSA result status: {getattr(result, 'status', 'no status attr')}"
|
|
359
|
-
)
|
|
360
|
-
logger.info(
|
|
361
|
-
f"Network objective: {getattr(network, 'objective', 'no objective')}"
|
|
257
|
+
f"Solve completed in {solve_time:.2f}s - status: {solve_result['status']}, objective: {objective_value}"
|
|
362
258
|
)
|
|
363
|
-
logger.info(f"Solve result success: {solve_result.get('success')}")
|
|
364
259
|
|
|
365
260
|
return solve_result
|
|
366
261
|
|
|
@@ -426,16 +321,12 @@ class NetworkSolver:
|
|
|
426
321
|
{"solver_options": custom_options} if custom_options else None
|
|
427
322
|
)
|
|
428
323
|
|
|
429
|
-
logger.info(
|
|
430
|
-
f"Using custom solver configuration: {actual_solver} with options: {custom_options}"
|
|
431
|
-
)
|
|
432
324
|
return actual_solver, merged_options
|
|
433
325
|
|
|
434
326
|
# Handle "default" solver
|
|
435
327
|
if solver_name == "default":
|
|
436
328
|
# Try to read user's default solver preference
|
|
437
329
|
actual_solver = self._resolve_default_solver()
|
|
438
|
-
logger.info(f"Resolved 'default' solver to: {actual_solver}")
|
|
439
330
|
return actual_solver, solver_options
|
|
440
331
|
|
|
441
332
|
# Handle special Gurobi configurations
|
|
@@ -501,7 +392,6 @@ class NetworkSolver:
|
|
|
501
392
|
}
|
|
502
393
|
if solver_options:
|
|
503
394
|
gurobi_options_balanced.update(solver_options)
|
|
504
|
-
logger.info(f"Using Gurobi Barrier+Dual Crossover Balanced configuration")
|
|
505
395
|
return "gurobi", gurobi_options_balanced
|
|
506
396
|
|
|
507
397
|
elif solver_name == "gurobi (dual simplex)":
|
|
@@ -527,9 +417,6 @@ class NetworkSolver:
|
|
|
527
417
|
}
|
|
528
418
|
if solver_options:
|
|
529
419
|
mosek_default_options["solver_options"].update(solver_options)
|
|
530
|
-
logger.info(
|
|
531
|
-
f"Using Mosek with default configuration (auto-select optimizer) and moderate MIP strategies"
|
|
532
|
-
)
|
|
533
420
|
return "mosek", mosek_default_options
|
|
534
421
|
|
|
535
422
|
elif solver_name == "mosek (barrier)":
|
|
@@ -549,9 +436,6 @@ class NetworkSolver:
|
|
|
549
436
|
}
|
|
550
437
|
if solver_options:
|
|
551
438
|
mosek_barrier_options["solver_options"].update(solver_options)
|
|
552
|
-
logger.info(
|
|
553
|
-
f"Using Mosek Barrier with aggressive presolve and relaxed tolerances"
|
|
554
|
-
)
|
|
555
439
|
return "mosek", mosek_barrier_options
|
|
556
440
|
|
|
557
441
|
elif solver_name == "mosek (barrier+crossover)":
|
|
@@ -569,9 +453,6 @@ class NetworkSolver:
|
|
|
569
453
|
}
|
|
570
454
|
if solver_options:
|
|
571
455
|
mosek_barrier_crossover_options["solver_options"].update(solver_options)
|
|
572
|
-
logger.info(
|
|
573
|
-
f"Using Mosek Barrier+Crossover configuration with Gurobi-matched tolerances and moderate MIP strategies"
|
|
574
|
-
)
|
|
575
456
|
return "mosek", mosek_barrier_crossover_options
|
|
576
457
|
|
|
577
458
|
elif solver_name == "mosek (dual simplex)":
|
|
@@ -586,9 +467,6 @@ class NetworkSolver:
|
|
|
586
467
|
}
|
|
587
468
|
if solver_options:
|
|
588
469
|
mosek_dual_options["solver_options"].update(solver_options)
|
|
589
|
-
logger.info(
|
|
590
|
-
f"Using Mosek Dual Simplex configuration with Gurobi-matched tolerances and moderate MIP strategies"
|
|
591
|
-
)
|
|
592
470
|
return "mosek", mosek_dual_options
|
|
593
471
|
|
|
594
472
|
# Check if this is a known valid solver name
|
|
@@ -603,9 +481,6 @@ class NetworkSolver:
|
|
|
603
481
|
}
|
|
604
482
|
if solver_options:
|
|
605
483
|
mosek_defaults["solver_options"].update(solver_options)
|
|
606
|
-
logger.info(
|
|
607
|
-
f"Using Mosek with barrier method for MIP (interior-point for root/nodes)"
|
|
608
|
-
)
|
|
609
484
|
return solver_name, mosek_defaults
|
|
610
485
|
|
|
611
486
|
elif solver_name == "gurobi":
|
|
@@ -620,7 +495,6 @@ class NetworkSolver:
|
|
|
620
495
|
}
|
|
621
496
|
if solver_options:
|
|
622
497
|
gurobi_defaults["solver_options"].update(solver_options)
|
|
623
|
-
logger.info(f"Using Gurobi with default MILP-friendly settings")
|
|
624
498
|
return solver_name, gurobi_defaults
|
|
625
499
|
|
|
626
500
|
# Handle special COPT configurations
|
|
@@ -644,9 +518,6 @@ class NetworkSolver:
|
|
|
644
518
|
}
|
|
645
519
|
if solver_options:
|
|
646
520
|
copt_barrier_options["solver_options"].update(solver_options)
|
|
647
|
-
logger.info(
|
|
648
|
-
f"Using COPT Barrier configuration (fast interior-point method)"
|
|
649
|
-
)
|
|
650
521
|
return "copt", copt_barrier_options
|
|
651
522
|
|
|
652
523
|
elif solver_name == "copt (barrier homogeneous)":
|
|
@@ -672,7 +543,6 @@ class NetworkSolver:
|
|
|
672
543
|
copt_barrier_homogeneous_options["solver_options"].update(
|
|
673
544
|
solver_options
|
|
674
545
|
)
|
|
675
|
-
logger.info(f"Using COPT Barrier Homogeneous configuration")
|
|
676
546
|
return "copt", copt_barrier_homogeneous_options
|
|
677
547
|
|
|
678
548
|
elif solver_name == "copt (barrier+crossover)":
|
|
@@ -691,9 +561,6 @@ class NetworkSolver:
|
|
|
691
561
|
}
|
|
692
562
|
if solver_options:
|
|
693
563
|
copt_barrier_crossover_options["solver_options"].update(solver_options)
|
|
694
|
-
logger.info(
|
|
695
|
-
f"Using COPT Barrier+Crossover configuration (balanced performance)"
|
|
696
|
-
)
|
|
697
564
|
return "copt", copt_barrier_crossover_options
|
|
698
565
|
|
|
699
566
|
elif solver_name == "copt (dual simplex)":
|
|
@@ -715,7 +582,6 @@ class NetworkSolver:
|
|
|
715
582
|
}
|
|
716
583
|
if solver_options:
|
|
717
584
|
copt_dual_simplex_options["solver_options"].update(solver_options)
|
|
718
|
-
logger.info(f"Using COPT Dual Simplex configuration (robust method)")
|
|
719
585
|
return "copt", copt_dual_simplex_options
|
|
720
586
|
|
|
721
587
|
elif solver_name == "copt (concurrent)":
|
|
@@ -737,9 +603,6 @@ class NetworkSolver:
|
|
|
737
603
|
}
|
|
738
604
|
if solver_options:
|
|
739
605
|
copt_concurrent_options["solver_options"].update(solver_options)
|
|
740
|
-
logger.info(
|
|
741
|
-
f"Using COPT Concurrent configuration (parallel simplex + barrier)"
|
|
742
|
-
)
|
|
743
606
|
return "copt", copt_concurrent_options
|
|
744
607
|
|
|
745
608
|
elif solver_name in ["highs", "cplex", "glpk", "cbc", "scip", "copt"]:
|
|
@@ -816,10 +679,6 @@ class NetworkSolver:
|
|
|
816
679
|
def extra_functionality(network, snapshots):
|
|
817
680
|
"""Apply optimization constraints during solve - matches old code structure"""
|
|
818
681
|
try:
|
|
819
|
-
logger.info(
|
|
820
|
-
f"Applying {len(optimization_constraints)} optimization constraints during solve"
|
|
821
|
-
)
|
|
822
|
-
|
|
823
682
|
# Apply each constraint in priority order
|
|
824
683
|
sorted_constraints = sorted(
|
|
825
684
|
optimization_constraints, key=lambda x: x.get("priority", 0)
|
|
@@ -836,8 +695,6 @@ class NetworkSolver:
|
|
|
836
695
|
)
|
|
837
696
|
continue
|
|
838
697
|
|
|
839
|
-
logger.info("Optimization constraints applied successfully")
|
|
840
|
-
|
|
841
698
|
except Exception as e:
|
|
842
699
|
logger.error(f"Failed to apply optimization constraints: {e}")
|
|
843
700
|
# Don't re-raise - let optimization continue
|
|
@@ -853,10 +710,6 @@ class NetworkSolver:
|
|
|
853
710
|
|
|
854
711
|
time_periods = get_network_time_periods(conn)
|
|
855
712
|
if time_periods and len(network.snapshots) > 0:
|
|
856
|
-
logger.info(
|
|
857
|
-
f"Setting snapshot weightings AFTER multi-period setup for {len(time_periods)} time periods"
|
|
858
|
-
)
|
|
859
|
-
|
|
860
713
|
# Get network info to determine time interval
|
|
861
714
|
network_info = get_network_info(conn)
|
|
862
715
|
time_interval = network_info.get("time_interval", "1H")
|
|
@@ -868,10 +721,6 @@ class NetworkSolver:
|
|
|
868
721
|
f"Could not parse time interval '{time_interval}', using default weight of 1.0"
|
|
869
722
|
)
|
|
870
723
|
|
|
871
|
-
logger.info(
|
|
872
|
-
f"Parsed time interval '{time_interval}' -> weight = {weight}"
|
|
873
|
-
)
|
|
874
|
-
|
|
875
724
|
# Create weightings array - all snapshots get the same weight for this time resolution
|
|
876
725
|
weightings = [weight] * len(time_periods)
|
|
877
726
|
|
|
@@ -880,21 +729,6 @@ class NetworkSolver:
|
|
|
880
729
|
network.snapshot_weightings.loc[:, "objective"] = weightings
|
|
881
730
|
network.snapshot_weightings.loc[:, "generators"] = weightings
|
|
882
731
|
network.snapshot_weightings.loc[:, "stores"] = weightings
|
|
883
|
-
logger.info(
|
|
884
|
-
f"Set snapshot weightings AFTER multi-period setup: objective, generators, stores columns"
|
|
885
|
-
)
|
|
886
|
-
|
|
887
|
-
# Debug logging like old code
|
|
888
|
-
logger.info(
|
|
889
|
-
f"Snapshot weightings shape: {network.snapshot_weightings.shape}"
|
|
890
|
-
)
|
|
891
|
-
logger.info(
|
|
892
|
-
f"Unique values in objective column: {network.snapshot_weightings['objective'].unique()}"
|
|
893
|
-
)
|
|
894
|
-
logger.info(
|
|
895
|
-
f"Sum of objective column: {network.snapshot_weightings['objective'].sum()}"
|
|
896
|
-
)
|
|
897
|
-
logger.info(f"Weight per snapshot: {weight} hours")
|
|
898
732
|
else:
|
|
899
733
|
logger.warning(
|
|
900
734
|
f"Mismatch between weightings ({len(weightings)}) and snapshots ({len(network.snapshots)})"
|
|
@@ -903,7 +737,6 @@ class NetworkSolver:
|
|
|
903
737
|
logger.warning(
|
|
904
738
|
f"Failed to set snapshot weightings after multi-period setup: {e}"
|
|
905
739
|
)
|
|
906
|
-
logger.exception("Full traceback:")
|
|
907
740
|
|
|
908
741
|
def _parse_time_interval(self, time_interval: str) -> Optional[float]:
|
|
909
742
|
"""Parse time interval string to hours - handles multiple formats."""
|
|
@@ -973,10 +806,6 @@ class NetworkSolver:
|
|
|
973
806
|
# Convert pandas Index to list for easier handling
|
|
974
807
|
years_list = years.tolist() if hasattr(years, "tolist") else list(years)
|
|
975
808
|
|
|
976
|
-
logger.info(
|
|
977
|
-
f"Calculating investment weightings for periods: {years_list} with discount rate: {discount_rate}"
|
|
978
|
-
)
|
|
979
|
-
|
|
980
809
|
# For single year, use simple weighting of 1.0
|
|
981
810
|
if len(years_list) == 1:
|
|
982
811
|
# Single year case
|
|
@@ -986,9 +815,6 @@ class NetworkSolver:
|
|
|
986
815
|
"years": pd.Series({years_list[0]: 1}),
|
|
987
816
|
}
|
|
988
817
|
)
|
|
989
|
-
logger.info(
|
|
990
|
-
f"Set single-year investment period weightings for year {years_list[0]}"
|
|
991
|
-
)
|
|
992
818
|
else:
|
|
993
819
|
# Multi-year case - EXACTLY match old code logic
|
|
994
820
|
# Get unique years from the network snapshots to determine period lengths
|
|
@@ -1003,8 +829,6 @@ class NetworkSolver:
|
|
|
1003
829
|
# Fallback: use investment periods as years
|
|
1004
830
|
snapshot_years = years_list
|
|
1005
831
|
|
|
1006
|
-
logger.info(f"Snapshot years found: {snapshot_years}")
|
|
1007
|
-
|
|
1008
832
|
# Calculate years per period - EXACTLY matching old code
|
|
1009
833
|
years_diff = []
|
|
1010
834
|
for i, year in enumerate(years_list):
|
|
@@ -1028,7 +852,6 @@ class NetworkSolver:
|
|
|
1028
852
|
period_years = 1
|
|
1029
853
|
|
|
1030
854
|
years_diff.append(period_years)
|
|
1031
|
-
logger.info(f"Period {year}: {period_years} years")
|
|
1032
855
|
|
|
1033
856
|
# Create weightings DataFrame with years column
|
|
1034
857
|
weightings_df = pd.DataFrame(
|
|
@@ -1039,22 +862,14 @@ class NetworkSolver:
|
|
|
1039
862
|
r = discount_rate
|
|
1040
863
|
T = 0 # Cumulative time tracker
|
|
1041
864
|
|
|
1042
|
-
logger.info(f"Calculating discount factors with rate {r}:")
|
|
1043
865
|
for period, nyears in weightings_df.years.items():
|
|
1044
866
|
# Calculate discount factors for each year in this period
|
|
1045
867
|
discounts = [(1 / (1 + r) ** t) for t in range(T, T + nyears)]
|
|
1046
868
|
period_weighting = sum(discounts)
|
|
1047
869
|
weightings_df.at[period, "objective"] = period_weighting
|
|
1048
|
-
|
|
1049
|
-
logger.info(
|
|
1050
|
-
f" Period {period}: years {T} to {T + nyears - 1}, discounts={[f'{d:.4f}' for d in discounts]}, sum={period_weighting:.4f}"
|
|
1051
|
-
)
|
|
1052
870
|
T += nyears # Update cumulative time
|
|
1053
871
|
|
|
1054
872
|
network.investment_period_weightings = weightings_df
|
|
1055
|
-
logger.info(f"Final investment period weightings:")
|
|
1056
|
-
logger.info(f" Years: {weightings_df['years'].to_dict()}")
|
|
1057
|
-
logger.info(f" Objective: {weightings_df['objective'].to_dict()}")
|
|
1058
873
|
|
|
1059
874
|
except Exception as e:
|
|
1060
875
|
logger.error(f"Failed to calculate investment weightings: {e}")
|
|
@@ -1086,15 +901,6 @@ class NetworkSolver:
|
|
|
1086
901
|
status = getattr(result, "status", "unknown")
|
|
1087
902
|
objective_value = getattr(network, "objective", None)
|
|
1088
903
|
|
|
1089
|
-
# Debug logging
|
|
1090
|
-
logger.info(
|
|
1091
|
-
f"Raw PyPSA result attributes: {dir(result) if result else 'None'}"
|
|
1092
|
-
)
|
|
1093
|
-
if hasattr(result, "termination_condition"):
|
|
1094
|
-
logger.info(f"Termination condition: {result.termination_condition}")
|
|
1095
|
-
if hasattr(result, "solver"):
|
|
1096
|
-
logger.info(f"Solver info: {result.solver}")
|
|
1097
|
-
|
|
1098
904
|
# Convert PyPSA result to dictionary format
|
|
1099
905
|
result_dict = self._convert_pypsa_result_to_dict(result)
|
|
1100
906
|
|
|
@@ -1155,25 +961,18 @@ class NetworkSolver:
|
|
|
1155
961
|
try:
|
|
1156
962
|
# Check explicit status first
|
|
1157
963
|
if status in ["optimal", "feasible"]:
|
|
1158
|
-
logger.info(f"Success determined by status: {status}")
|
|
1159
964
|
return True
|
|
1160
965
|
|
|
1161
966
|
# Check termination condition
|
|
1162
967
|
if hasattr(result, "termination_condition"):
|
|
1163
968
|
term_condition = str(result.termination_condition).lower()
|
|
1164
969
|
if "optimal" in term_condition:
|
|
1165
|
-
logger.info(
|
|
1166
|
-
f"Success determined by termination condition: {result.termination_condition}"
|
|
1167
|
-
)
|
|
1168
970
|
return True
|
|
1169
971
|
|
|
1170
972
|
# Check if we have a valid objective value
|
|
1171
973
|
if objective_value is not None and not (
|
|
1172
974
|
objective_value == 0 and status == "unknown"
|
|
1173
975
|
):
|
|
1174
|
-
logger.info(
|
|
1175
|
-
f"Success determined by valid objective value: {objective_value}"
|
|
1176
|
-
)
|
|
1177
976
|
return True
|
|
1178
977
|
|
|
1179
978
|
# Check solver-specific success indicators
|
|
@@ -1182,13 +981,10 @@ class NetworkSolver:
|
|
|
1182
981
|
if hasattr(solver_info, "termination_condition"):
|
|
1183
982
|
term_condition = str(solver_info.termination_condition).lower()
|
|
1184
983
|
if "optimal" in term_condition:
|
|
1185
|
-
logger.info(
|
|
1186
|
-
f"Success determined by solver termination condition: {solver_info.termination_condition}"
|
|
1187
|
-
)
|
|
1188
984
|
return True
|
|
1189
985
|
|
|
1190
986
|
logger.warning(
|
|
1191
|
-
f"Could not determine success: status={status}, objective={objective_value}
|
|
987
|
+
f"Could not determine solve success: status={status}, objective={objective_value}"
|
|
1192
988
|
)
|
|
1193
989
|
return False
|
|
1194
990
|
|
|
@@ -1438,12 +1234,8 @@ class NetworkSolver:
|
|
|
1438
1234
|
years = sorted(period_values.unique())
|
|
1439
1235
|
else:
|
|
1440
1236
|
# If no year info, skip year-based calculations
|
|
1441
|
-
logger.info(
|
|
1442
|
-
"No year information found in network - skipping year-based statistics"
|
|
1443
|
-
)
|
|
1444
1237
|
return {}
|
|
1445
1238
|
|
|
1446
|
-
logger.info(f"Calculating year-based statistics for years: {years}")
|
|
1447
1239
|
year_statistics = {}
|
|
1448
1240
|
|
|
1449
1241
|
for year in years:
|
|
@@ -1452,14 +1244,10 @@ class NetworkSolver:
|
|
|
1452
1244
|
network, year, solve_time, solver_name
|
|
1453
1245
|
)
|
|
1454
1246
|
year_statistics[year] = year_stats
|
|
1455
|
-
logger.info(f"Calculated statistics for year {year}")
|
|
1456
1247
|
except Exception as e:
|
|
1457
1248
|
logger.error(f"Failed to calculate statistics for year {year}: {e}")
|
|
1458
1249
|
continue
|
|
1459
1250
|
|
|
1460
|
-
logger.info(
|
|
1461
|
-
f"Successfully calculated year-based statistics for {len(year_statistics)} years"
|
|
1462
|
-
)
|
|
1463
1251
|
return year_statistics
|
|
1464
1252
|
|
|
1465
1253
|
except Exception as e:
|