pyconvexity 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyconvexity might be problematic. Click here for more details.
- pyconvexity/__init__.py +9 -0
- pyconvexity/_version.py +1 -1
- pyconvexity/solvers/pypsa/solver.py +11 -373
- pyconvexity/transformations/__init__.py +15 -0
- pyconvexity/transformations/api.py +93 -0
- pyconvexity/transformations/time_axis.py +721 -0
- {pyconvexity-0.5.0.dist-info → pyconvexity-0.5.1.dist-info}/METADATA +1 -1
- {pyconvexity-0.5.0.dist-info → pyconvexity-0.5.1.dist-info}/RECORD +10 -7
- {pyconvexity-0.5.0.dist-info → pyconvexity-0.5.1.dist-info}/WHEEL +0 -0
- {pyconvexity-0.5.0.dist-info → pyconvexity-0.5.1.dist-info}/top_level.txt +0 -0
pyconvexity/__init__.py
CHANGED
|
@@ -239,3 +239,12 @@ try:
|
|
|
239
239
|
except ImportError:
|
|
240
240
|
# NetCDF dependencies not available
|
|
241
241
|
pass
|
|
242
|
+
|
|
243
|
+
# Transformation operations
|
|
244
|
+
try:
|
|
245
|
+
from pyconvexity.transformations import modify_time_axis
|
|
246
|
+
|
|
247
|
+
__all__.append("modify_time_axis")
|
|
248
|
+
except ImportError:
|
|
249
|
+
# Transformation dependencies (pandas, numpy) not available
|
|
250
|
+
pass
|
pyconvexity/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.5.
|
|
1
|
+
__version__ = "0.5.1"
|
|
@@ -38,81 +38,6 @@ class NetworkSolver:
|
|
|
38
38
|
"Please ensure it is installed correctly in the environment."
|
|
39
39
|
) from e
|
|
40
40
|
|
|
41
|
-
def _get_user_settings_path(self):
|
|
42
|
-
"""Get the path to the user settings file (same location as Tauri uses)"""
|
|
43
|
-
try:
|
|
44
|
-
import platform
|
|
45
|
-
import os
|
|
46
|
-
from pathlib import Path
|
|
47
|
-
|
|
48
|
-
system = platform.system()
|
|
49
|
-
if system == "Darwin": # macOS
|
|
50
|
-
home = Path.home()
|
|
51
|
-
app_data_dir = (
|
|
52
|
-
home / "Library" / "Application Support" / "com.convexity.desktop"
|
|
53
|
-
)
|
|
54
|
-
elif system == "Windows":
|
|
55
|
-
app_data_dir = (
|
|
56
|
-
Path(os.environ.get("APPDATA", "")) / "com.convexity.desktop"
|
|
57
|
-
)
|
|
58
|
-
else: # Linux
|
|
59
|
-
home = Path.home()
|
|
60
|
-
app_data_dir = home / ".local" / "share" / "com.convexity.desktop"
|
|
61
|
-
|
|
62
|
-
settings_file = app_data_dir / "user_settings.json"
|
|
63
|
-
return settings_file if settings_file.exists() else None
|
|
64
|
-
|
|
65
|
-
except Exception as e:
|
|
66
|
-
return None
|
|
67
|
-
|
|
68
|
-
def _resolve_default_solver(self) -> str:
|
|
69
|
-
"""Resolve 'default' solver to user's preferred solver"""
|
|
70
|
-
try:
|
|
71
|
-
import json
|
|
72
|
-
|
|
73
|
-
settings_path = self._get_user_settings_path()
|
|
74
|
-
if not settings_path:
|
|
75
|
-
return "highs"
|
|
76
|
-
|
|
77
|
-
with open(settings_path, "r") as f:
|
|
78
|
-
user_settings = json.load(f)
|
|
79
|
-
|
|
80
|
-
# Get default solver from user settings
|
|
81
|
-
default_solver = user_settings.get("default_solver", "highs")
|
|
82
|
-
|
|
83
|
-
# Validate that it's a known solver
|
|
84
|
-
known_solvers = [
|
|
85
|
-
"highs",
|
|
86
|
-
"gurobi",
|
|
87
|
-
"gurobi (barrier)",
|
|
88
|
-
"gurobi (barrier homogeneous)",
|
|
89
|
-
"gurobi (barrier+crossover balanced)",
|
|
90
|
-
"gurobi (dual simplex)",
|
|
91
|
-
"mosek",
|
|
92
|
-
"mosek (default)",
|
|
93
|
-
"mosek (barrier)",
|
|
94
|
-
"mosek (barrier+crossover)",
|
|
95
|
-
"mosek (dual simplex)",
|
|
96
|
-
"copt",
|
|
97
|
-
"copt (barrier)",
|
|
98
|
-
"copt (barrier homogeneous)",
|
|
99
|
-
"copt (barrier+crossover)",
|
|
100
|
-
"copt (dual simplex)",
|
|
101
|
-
"copt (concurrent)",
|
|
102
|
-
"cplex",
|
|
103
|
-
"glpk",
|
|
104
|
-
"cbc",
|
|
105
|
-
"scip",
|
|
106
|
-
]
|
|
107
|
-
|
|
108
|
-
if default_solver in known_solvers:
|
|
109
|
-
return default_solver
|
|
110
|
-
else:
|
|
111
|
-
return "highs"
|
|
112
|
-
|
|
113
|
-
except Exception as e:
|
|
114
|
-
return "highs"
|
|
115
|
-
|
|
116
41
|
def solve_network(
|
|
117
42
|
self,
|
|
118
43
|
network: "pypsa.Network",
|
|
@@ -269,24 +194,19 @@ class NetworkSolver:
|
|
|
269
194
|
custom_solver_config: Optional[Dict[str, Any]] = None,
|
|
270
195
|
) -> tuple[str, Optional[Dict[str, Any]]]:
|
|
271
196
|
"""
|
|
272
|
-
Get the actual solver name and options for
|
|
197
|
+
Get the actual solver name and options for solver configurations.
|
|
273
198
|
|
|
274
199
|
Args:
|
|
275
|
-
solver_name: The solver name (e.g., '
|
|
200
|
+
solver_name: The solver name (e.g., 'highs', 'gurobi', 'custom')
|
|
276
201
|
solver_options: Optional additional solver options
|
|
277
|
-
custom_solver_config:
|
|
202
|
+
custom_solver_config: Custom solver configuration (from frontend)
|
|
278
203
|
Format: {"solver": "actual_solver_name", "solver_options": {...}}
|
|
279
204
|
|
|
280
205
|
Returns:
|
|
281
206
|
Tuple of (actual_solver_name, solver_options_dict)
|
|
282
207
|
"""
|
|
283
|
-
# Handle "custom" solver with custom configuration
|
|
284
|
-
if solver_name == "custom":
|
|
285
|
-
if not custom_solver_config:
|
|
286
|
-
raise ValueError(
|
|
287
|
-
"custom_solver_config must be provided when solver_name='custom'"
|
|
288
|
-
)
|
|
289
|
-
|
|
208
|
+
# Handle "custom" solver with custom configuration from frontend
|
|
209
|
+
if solver_name == "custom" and custom_solver_config:
|
|
290
210
|
if "solver" not in custom_solver_config:
|
|
291
211
|
raise ValueError(
|
|
292
212
|
"custom_solver_config must contain 'solver' key with the actual solver name"
|
|
@@ -307,294 +227,12 @@ class NetworkSolver:
|
|
|
307
227
|
|
|
308
228
|
return actual_solver, merged_options
|
|
309
229
|
|
|
310
|
-
#
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
# Handle special Gurobi configurations
|
|
317
|
-
if solver_name == "gurobi (barrier)":
|
|
318
|
-
gurobi_barrier_options = {
|
|
319
|
-
"solver_options": {
|
|
320
|
-
"Method": 2, # Barrier
|
|
321
|
-
"Crossover": 0, # Skip crossover
|
|
322
|
-
"MIPGap": 0.05, # 5% gap
|
|
323
|
-
"Threads": 0, # Use all cores (0 = auto)
|
|
324
|
-
"Presolve": 2, # Aggressive presolve
|
|
325
|
-
"ConcurrentMIP": 1, # Parallel root strategies
|
|
326
|
-
"BarConvTol": 1e-4, # Relaxed barrier convergence
|
|
327
|
-
"FeasibilityTol": 1e-5,
|
|
328
|
-
"OptimalityTol": 1e-5,
|
|
329
|
-
"NumericFocus": 1, # Improve stability
|
|
330
|
-
"PreSparsify": 1,
|
|
331
|
-
}
|
|
332
|
-
}
|
|
333
|
-
# Merge with any additional options
|
|
334
|
-
if solver_options:
|
|
335
|
-
gurobi_barrier_options.update(solver_options)
|
|
336
|
-
return "gurobi", gurobi_barrier_options
|
|
337
|
-
|
|
338
|
-
elif solver_name == "gurobi (barrier homogeneous)":
|
|
339
|
-
gurobi_barrier_homogeneous_options = {
|
|
340
|
-
"solver_options": {
|
|
341
|
-
"Method": 2, # Barrier
|
|
342
|
-
"Crossover": 0, # Skip crossover
|
|
343
|
-
"MIPGap": 0.05,
|
|
344
|
-
"Threads": 0, # Use all cores (0 = auto)
|
|
345
|
-
"Presolve": 2,
|
|
346
|
-
"ConcurrentMIP": 1,
|
|
347
|
-
"BarConvTol": 1e-4,
|
|
348
|
-
"FeasibilityTol": 1e-5,
|
|
349
|
-
"OptimalityTol": 1e-5,
|
|
350
|
-
"NumericFocus": 1,
|
|
351
|
-
"PreSparsify": 1,
|
|
352
|
-
"BarHomogeneous": 1, # Enable homogeneous barrier algorithm
|
|
353
|
-
}
|
|
354
|
-
}
|
|
355
|
-
if solver_options:
|
|
356
|
-
gurobi_barrier_homogeneous_options.update(solver_options)
|
|
357
|
-
return "gurobi", gurobi_barrier_homogeneous_options
|
|
358
|
-
|
|
359
|
-
elif solver_name == "gurobi (barrier+crossover balanced)":
|
|
360
|
-
gurobi_options_balanced = {
|
|
361
|
-
"solver_options": {
|
|
362
|
-
"Method": 2,
|
|
363
|
-
"Crossover": 1, # Dual crossover
|
|
364
|
-
"MIPGap": 0.01,
|
|
365
|
-
"Threads": 0, # Use all cores (0 = auto)
|
|
366
|
-
"Presolve": 2,
|
|
367
|
-
"Heuristics": 0.1,
|
|
368
|
-
"Cuts": 2,
|
|
369
|
-
"ConcurrentMIP": 1,
|
|
370
|
-
"BarConvTol": 1e-6,
|
|
371
|
-
"FeasibilityTol": 1e-6,
|
|
372
|
-
"OptimalityTol": 1e-6,
|
|
373
|
-
"NumericFocus": 1,
|
|
374
|
-
"PreSparsify": 1,
|
|
375
|
-
}
|
|
376
|
-
}
|
|
377
|
-
if solver_options:
|
|
378
|
-
gurobi_options_balanced.update(solver_options)
|
|
379
|
-
return "gurobi", gurobi_options_balanced
|
|
380
|
-
|
|
381
|
-
elif solver_name == "gurobi (dual simplex)":
|
|
382
|
-
gurobi_dual_options = {
|
|
383
|
-
"solver_options": {
|
|
384
|
-
"Method": 1, # Dual simplex method
|
|
385
|
-
"Threads": 0, # Use all available cores
|
|
386
|
-
"Presolve": 2, # Aggressive presolve
|
|
387
|
-
}
|
|
388
|
-
}
|
|
389
|
-
if solver_options:
|
|
390
|
-
gurobi_dual_options.update(solver_options)
|
|
391
|
-
return "gurobi", gurobi_dual_options
|
|
392
|
-
|
|
393
|
-
# Handle special Mosek configurations
|
|
394
|
-
elif solver_name == "mosek (default)":
|
|
395
|
-
# No custom options - let Mosek use its default configuration
|
|
396
|
-
mosek_default_options = {
|
|
397
|
-
"solver_options": {
|
|
398
|
-
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # MIP relative gap tolerance (5% to match Gurobi)
|
|
399
|
-
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hours
|
|
400
|
-
}
|
|
401
|
-
}
|
|
402
|
-
if solver_options:
|
|
403
|
-
mosek_default_options["solver_options"].update(solver_options)
|
|
404
|
-
return "mosek", mosek_default_options
|
|
405
|
-
|
|
406
|
-
elif solver_name == "mosek (barrier)":
|
|
407
|
-
mosek_barrier_options = {
|
|
408
|
-
"solver_options": {
|
|
409
|
-
"MSK_IPAR_INTPNT_BASIS": 0, # Skip crossover (barrier-only) - 0 = MSK_BI_NEVER
|
|
410
|
-
"MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance
|
|
411
|
-
"MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi primal feasibility
|
|
412
|
-
"MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi dual feasibility
|
|
413
|
-
# Removed MSK_DPAR_INTPNT_TOL_INFEAS - was 1000x tighter than other tolerances!
|
|
414
|
-
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
|
|
415
|
-
"MSK_IPAR_PRESOLVE_USE": 2, # Aggressive presolve (match Gurobi Presolve=2)
|
|
416
|
-
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap
|
|
417
|
-
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
|
|
418
|
-
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour
|
|
419
|
-
}
|
|
420
|
-
}
|
|
421
|
-
if solver_options:
|
|
422
|
-
mosek_barrier_options["solver_options"].update(solver_options)
|
|
423
|
-
return "mosek", mosek_barrier_options
|
|
424
|
-
|
|
425
|
-
elif solver_name == "mosek (barrier+crossover)":
|
|
426
|
-
mosek_barrier_crossover_options = {
|
|
427
|
-
"solver_options": {
|
|
428
|
-
"MSK_IPAR_INTPNT_BASIS": 1, # Always crossover (1 = MSK_BI_ALWAYS)
|
|
429
|
-
"MSK_DPAR_INTPNT_TOL_REL_GAP": 1e-4, # Match Gurobi barrier tolerance (was 1e-6)
|
|
430
|
-
"MSK_DPAR_INTPNT_TOL_PFEAS": 1e-5, # Match Gurobi (was 1e-6)
|
|
431
|
-
"MSK_DPAR_INTPNT_TOL_DFEAS": 1e-5, # Match Gurobi (was 1e-6)
|
|
432
|
-
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = auto)
|
|
433
|
-
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
434
|
-
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 4, # Use interior-point for MIP root
|
|
435
|
-
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
|
|
436
|
-
}
|
|
437
|
-
}
|
|
438
|
-
if solver_options:
|
|
439
|
-
mosek_barrier_crossover_options["solver_options"].update(solver_options)
|
|
440
|
-
return "mosek", mosek_barrier_crossover_options
|
|
441
|
-
|
|
442
|
-
elif solver_name == "mosek (dual simplex)":
|
|
443
|
-
mosek_dual_options = {
|
|
444
|
-
"solver_options": {
|
|
445
|
-
"MSK_IPAR_NUM_THREADS": 0, # Use all available cores (0 = automatic)
|
|
446
|
-
"MSK_IPAR_PRESOLVE_USE": 1, # Force presolve
|
|
447
|
-
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-6)
|
|
448
|
-
"MSK_IPAR_MIO_ROOT_OPTIMIZER": 1, # Use dual simplex for MIP root
|
|
449
|
-
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hour (safety limit)
|
|
450
|
-
}
|
|
451
|
-
}
|
|
452
|
-
if solver_options:
|
|
453
|
-
mosek_dual_options["solver_options"].update(solver_options)
|
|
454
|
-
return "mosek", mosek_dual_options
|
|
455
|
-
|
|
456
|
-
# Check if this is a known valid solver name
|
|
457
|
-
elif solver_name == "mosek":
|
|
458
|
-
# Add default MILP-friendly settings for plain Mosek
|
|
459
|
-
mosek_defaults = {
|
|
460
|
-
"solver_options": {
|
|
461
|
-
"MSK_DPAR_MIO_REL_GAP_CONST": 0.05, # Match Gurobi 5% MIP gap (was 1e-4)
|
|
462
|
-
"MSK_DPAR_MIO_MAX_TIME": 36000, # Max time 10 hours
|
|
463
|
-
"MSK_IPAR_NUM_THREADS": 0, # Use all cores (0 = auto)
|
|
464
|
-
}
|
|
465
|
-
}
|
|
466
|
-
if solver_options:
|
|
467
|
-
mosek_defaults["solver_options"].update(solver_options)
|
|
468
|
-
return solver_name, mosek_defaults
|
|
469
|
-
|
|
470
|
-
elif solver_name == "gurobi":
|
|
471
|
-
# Add default MILP-friendly settings for plain Gurobi (for consistency)
|
|
472
|
-
gurobi_defaults = {
|
|
473
|
-
"solver_options": {
|
|
474
|
-
"MIPGap": 1e-4, # 0.01% gap
|
|
475
|
-
"TimeLimit": 3600, # 1 hour
|
|
476
|
-
"Threads": 0, # Use all cores
|
|
477
|
-
"OutputFlag": 1, # Enable output
|
|
478
|
-
}
|
|
479
|
-
}
|
|
480
|
-
if solver_options:
|
|
481
|
-
gurobi_defaults["solver_options"].update(solver_options)
|
|
482
|
-
return solver_name, gurobi_defaults
|
|
483
|
-
|
|
484
|
-
# Handle special COPT configurations
|
|
485
|
-
elif solver_name == "copt (barrier)":
|
|
486
|
-
copt_barrier_options = {
|
|
487
|
-
"solver_options": {
|
|
488
|
-
"LpMethod": 2, # Barrier method
|
|
489
|
-
"Crossover": 0, # Skip crossover for speed
|
|
490
|
-
"RelGap": 0.05, # 5% MIP gap (match Gurobi)
|
|
491
|
-
"TimeLimit": 7200, # 1 hour time limit
|
|
492
|
-
"Threads": -1, # 4 threads (memory-conscious)
|
|
493
|
-
"Presolve": 3, # Aggressive presolve
|
|
494
|
-
"Scaling": 1, # Enable scaling
|
|
495
|
-
"FeasTol": 1e-5, # Match Gurobi feasibility
|
|
496
|
-
"DualTol": 1e-5, # Match Gurobi dual tolerance
|
|
497
|
-
# MIP performance settings
|
|
498
|
-
"CutLevel": 2, # Normal cut generation
|
|
499
|
-
"HeurLevel": 3, # Aggressive heuristics
|
|
500
|
-
"StrongBranching": 1, # Fast strong branching
|
|
501
|
-
}
|
|
502
|
-
}
|
|
503
|
-
if solver_options:
|
|
504
|
-
copt_barrier_options["solver_options"].update(solver_options)
|
|
505
|
-
return "copt", copt_barrier_options
|
|
506
|
-
|
|
507
|
-
elif solver_name == "copt (barrier homogeneous)":
|
|
508
|
-
copt_barrier_homogeneous_options = {
|
|
509
|
-
"solver_options": {
|
|
510
|
-
"LpMethod": 2, # Barrier method
|
|
511
|
-
"Crossover": 0, # Skip crossover
|
|
512
|
-
"BarHomogeneous": 1, # Use homogeneous self-dual form
|
|
513
|
-
"RelGap": 0.05, # 5% MIP gap
|
|
514
|
-
"TimeLimit": 3600, # 1 hour
|
|
515
|
-
"Threads": -1, # 4 threads (memory-conscious)
|
|
516
|
-
"Presolve": 3, # Aggressive presolve
|
|
517
|
-
"Scaling": 1, # Enable scaling
|
|
518
|
-
"FeasTol": 1e-5,
|
|
519
|
-
"DualTol": 1e-5,
|
|
520
|
-
# MIP performance settings
|
|
521
|
-
"CutLevel": 2, # Normal cuts
|
|
522
|
-
"HeurLevel": 3, # Aggressive heuristics
|
|
523
|
-
"StrongBranching": 1, # Fast strong branching
|
|
524
|
-
}
|
|
525
|
-
}
|
|
526
|
-
if solver_options:
|
|
527
|
-
copt_barrier_homogeneous_options["solver_options"].update(
|
|
528
|
-
solver_options
|
|
529
|
-
)
|
|
530
|
-
return "copt", copt_barrier_homogeneous_options
|
|
531
|
-
|
|
532
|
-
elif solver_name == "copt (barrier+crossover)":
|
|
533
|
-
copt_barrier_crossover_options = {
|
|
534
|
-
"solver_options": {
|
|
535
|
-
"LpMethod": 2, # Barrier method
|
|
536
|
-
"Crossover": 1, # Enable crossover for better solutions
|
|
537
|
-
"RelGap": 0.05, # 5% MIP gap (relaxed for faster solves)
|
|
538
|
-
"TimeLimit": 36000, # 10 hour
|
|
539
|
-
"Threads": -1, # Use all cores
|
|
540
|
-
"Presolve": 2, # Aggressive presolve
|
|
541
|
-
"Scaling": 1, # Enable scaling
|
|
542
|
-
"FeasTol": 1e-4, # Tighter feasibility
|
|
543
|
-
"DualTol": 1e-4, # Tighter dual tolerance
|
|
544
|
-
}
|
|
545
|
-
}
|
|
546
|
-
if solver_options:
|
|
547
|
-
copt_barrier_crossover_options["solver_options"].update(solver_options)
|
|
548
|
-
return "copt", copt_barrier_crossover_options
|
|
549
|
-
|
|
550
|
-
elif solver_name == "copt (dual simplex)":
|
|
551
|
-
copt_dual_simplex_options = {
|
|
552
|
-
"solver_options": {
|
|
553
|
-
"LpMethod": 1, # Dual simplex method
|
|
554
|
-
"RelGap": 0.05, # 5% MIP gap
|
|
555
|
-
"TimeLimit": 3600, # 1 hour
|
|
556
|
-
"Threads": -1, # Use all cores
|
|
557
|
-
"Presolve": 3, # Aggressive presolve
|
|
558
|
-
"Scaling": 1, # Enable scaling
|
|
559
|
-
"FeasTol": 1e-6,
|
|
560
|
-
"DualTol": 1e-6,
|
|
561
|
-
# MIP performance settings
|
|
562
|
-
"CutLevel": 2, # Normal cuts
|
|
563
|
-
"HeurLevel": 2, # Normal heuristics
|
|
564
|
-
"StrongBranching": 1, # Fast strong branching
|
|
565
|
-
}
|
|
566
|
-
}
|
|
567
|
-
if solver_options:
|
|
568
|
-
copt_dual_simplex_options["solver_options"].update(solver_options)
|
|
569
|
-
return "copt", copt_dual_simplex_options
|
|
570
|
-
|
|
571
|
-
elif solver_name == "copt (concurrent)":
|
|
572
|
-
copt_concurrent_options = {
|
|
573
|
-
"solver_options": {
|
|
574
|
-
"LpMethod": 4, # Concurrent (simplex + barrier)
|
|
575
|
-
"RelGap": 0.05, # 5% MIP gap
|
|
576
|
-
"TimeLimit": 3600, # 1 hour
|
|
577
|
-
"Threads": -1, # Use all cores
|
|
578
|
-
"Presolve": 3, # Aggressive presolve
|
|
579
|
-
"Scaling": 1, # Enable scaling
|
|
580
|
-
"FeasTol": 1e-5,
|
|
581
|
-
"DualTol": 1e-5,
|
|
582
|
-
# MIP performance settings
|
|
583
|
-
"CutLevel": 2, # Normal cuts
|
|
584
|
-
"HeurLevel": 3, # Aggressive heuristics
|
|
585
|
-
"StrongBranching": 1, # Fast strong branching
|
|
586
|
-
}
|
|
587
|
-
}
|
|
588
|
-
if solver_options:
|
|
589
|
-
copt_concurrent_options["solver_options"].update(solver_options)
|
|
590
|
-
return "copt", copt_concurrent_options
|
|
591
|
-
|
|
592
|
-
elif solver_name in ["highs", "cplex", "glpk", "cbc", "scip", "copt"]:
|
|
593
|
-
return solver_name, solver_options
|
|
594
|
-
|
|
595
|
-
else:
|
|
596
|
-
# Unknown solver name - fall back to highs
|
|
597
|
-
return "highs", solver_options
|
|
230
|
+
# For all other cases, pass through solver name and options directly
|
|
231
|
+
# The frontend is responsible for resolving presets and defaults
|
|
232
|
+
if solver_options:
|
|
233
|
+
return solver_name, {"solver_options": solver_options}
|
|
234
|
+
|
|
235
|
+
return solver_name, None
|
|
598
236
|
|
|
599
237
|
def _detect_constraint_type(self, constraint_code: str) -> str:
|
|
600
238
|
"""
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Transformations module for PyConvexity.
|
|
3
|
+
|
|
4
|
+
Provides functions for transforming network data, including:
|
|
5
|
+
- Time axis modification (truncation, resampling)
|
|
6
|
+
- Future: network merging, scenario duplication, etc.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from pyconvexity.transformations.api import modify_time_axis
|
|
10
|
+
from pyconvexity.transformations.time_axis import TimeAxisModifier
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"modify_time_axis",
|
|
14
|
+
"TimeAxisModifier",
|
|
15
|
+
]
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""
|
|
2
|
+
High-level API for network transformations.
|
|
3
|
+
|
|
4
|
+
Provides user-friendly functions for transforming network data.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Callable, Dict, Optional
|
|
8
|
+
|
|
9
|
+
from pyconvexity.transformations.time_axis import TimeAxisModifier
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def modify_time_axis(
|
|
13
|
+
source_db_path: str,
|
|
14
|
+
target_db_path: str,
|
|
15
|
+
new_start: str,
|
|
16
|
+
new_end: str,
|
|
17
|
+
new_resolution_minutes: int,
|
|
18
|
+
new_network_name: Optional[str] = None,
|
|
19
|
+
convert_timeseries: bool = True,
|
|
20
|
+
progress_callback: Optional[Callable[[float, str], None]] = None,
|
|
21
|
+
) -> Dict[str, Any]:
|
|
22
|
+
"""
|
|
23
|
+
Create a new database with modified time axis and resampled timeseries data.
|
|
24
|
+
|
|
25
|
+
This function copies a network database while adjusting the time axis -
|
|
26
|
+
useful for truncating time periods, changing resolution, or both.
|
|
27
|
+
All timeseries data is automatically resampled to match the new time axis.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
source_db_path: Path to source database
|
|
31
|
+
target_db_path: Path to target database (will be created)
|
|
32
|
+
new_start: Start datetime as ISO string (e.g., "2024-01-01 00:00:00")
|
|
33
|
+
new_end: End datetime as ISO string (e.g., "2024-12-31 23:00:00")
|
|
34
|
+
new_resolution_minutes: New time resolution in minutes (e.g., 60 for hourly)
|
|
35
|
+
new_network_name: Optional new name for the network
|
|
36
|
+
convert_timeseries: If True, resample timeseries data to new time axis.
|
|
37
|
+
If False, wipe all timeseries attributes (useful for creating templates)
|
|
38
|
+
progress_callback: Optional callback for progress updates.
|
|
39
|
+
Called with (progress: float, message: str) where progress is 0-100.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Dictionary with results and statistics:
|
|
43
|
+
- success: bool - Whether the operation completed successfully
|
|
44
|
+
- source_db_path: str - Path to source database
|
|
45
|
+
- target_db_path: str - Path to created target database
|
|
46
|
+
- new_periods_count: int - Number of time periods in new database
|
|
47
|
+
- new_resolution_minutes: int - Resolution in minutes
|
|
48
|
+
- new_start: str - Start time
|
|
49
|
+
- new_end: str - End time
|
|
50
|
+
- processing_stats: dict - Detailed processing statistics
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
ValueError: If time parameters are invalid (end before start, negative resolution)
|
|
54
|
+
FileNotFoundError: If source database doesn't exist
|
|
55
|
+
|
|
56
|
+
Example:
|
|
57
|
+
# Truncate a yearly model to one week with hourly resolution
|
|
58
|
+
result = modify_time_axis(
|
|
59
|
+
source_db_path="full_year_model.db",
|
|
60
|
+
target_db_path="one_week_model.db",
|
|
61
|
+
new_start="2024-01-01 00:00:00",
|
|
62
|
+
new_end="2024-01-07 23:00:00",
|
|
63
|
+
new_resolution_minutes=60,
|
|
64
|
+
new_network_name="One Week Test Model",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if result["success"]:
|
|
68
|
+
print(f"Created {result['target_db_path']} with {result['new_periods_count']} periods")
|
|
69
|
+
|
|
70
|
+
Example with progress tracking:
|
|
71
|
+
def on_progress(progress: float, message: str):
|
|
72
|
+
print(f"[{progress:.0f}%] {message}")
|
|
73
|
+
|
|
74
|
+
result = modify_time_axis(
|
|
75
|
+
source_db_path="original.db",
|
|
76
|
+
target_db_path="resampled.db",
|
|
77
|
+
new_start="2024-01-01",
|
|
78
|
+
new_end="2024-06-30",
|
|
79
|
+
new_resolution_minutes=60,
|
|
80
|
+
progress_callback=on_progress,
|
|
81
|
+
)
|
|
82
|
+
"""
|
|
83
|
+
modifier = TimeAxisModifier()
|
|
84
|
+
return modifier.modify_time_axis(
|
|
85
|
+
source_db_path=source_db_path,
|
|
86
|
+
target_db_path=target_db_path,
|
|
87
|
+
new_start=new_start,
|
|
88
|
+
new_end=new_end,
|
|
89
|
+
new_resolution_minutes=new_resolution_minutes,
|
|
90
|
+
new_network_name=new_network_name,
|
|
91
|
+
convert_timeseries=convert_timeseries,
|
|
92
|
+
progress_callback=progress_callback,
|
|
93
|
+
)
|
|
@@ -0,0 +1,721 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Time axis modification for PyConvexity networks.
|
|
3
|
+
|
|
4
|
+
Handles truncation and resampling of network time periods and all associated timeseries data.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import shutil
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
import pandas as pd
|
|
14
|
+
|
|
15
|
+
from pyconvexity.core.database import database_context
|
|
16
|
+
from pyconvexity.core.types import TimePeriod
|
|
17
|
+
from pyconvexity.models.network import get_network_info, get_network_time_periods
|
|
18
|
+
from pyconvexity.timeseries import get_timeseries, set_timeseries
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TimeAxisModifier:
|
|
24
|
+
"""
|
|
25
|
+
Service for modifying network time axis and resampling all timeseries data.
|
|
26
|
+
|
|
27
|
+
This class handles the complete workflow of:
|
|
28
|
+
1. Copying a network database
|
|
29
|
+
2. Modifying the time axis (start, end, resolution)
|
|
30
|
+
3. Resampling all timeseries data to match the new time axis
|
|
31
|
+
|
|
32
|
+
Example:
|
|
33
|
+
modifier = TimeAxisModifier()
|
|
34
|
+
result = modifier.modify_time_axis(
|
|
35
|
+
source_db_path="original.db",
|
|
36
|
+
target_db_path="resampled.db",
|
|
37
|
+
new_start="2024-01-01 00:00:00",
|
|
38
|
+
new_end="2024-01-07 23:00:00",
|
|
39
|
+
new_resolution_minutes=60,
|
|
40
|
+
)
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(self):
|
|
44
|
+
logger.debug("TimeAxisModifier initialized")
|
|
45
|
+
|
|
46
|
+
def _minutes_to_freq_str(self, minutes: int) -> str:
|
|
47
|
+
"""Convert minutes to a pandas frequency string, preferring aliases like 'D' or 'h'."""
|
|
48
|
+
if minutes == 1440:
|
|
49
|
+
return "D"
|
|
50
|
+
if minutes % (60 * 24) == 0:
|
|
51
|
+
days = minutes // (60 * 24)
|
|
52
|
+
return f"{days}D"
|
|
53
|
+
if minutes == 60:
|
|
54
|
+
return "h"
|
|
55
|
+
if minutes % 60 == 0:
|
|
56
|
+
hours = minutes // 60
|
|
57
|
+
return f"{hours}h"
|
|
58
|
+
return f"{minutes}min"
|
|
59
|
+
|
|
60
|
+
def modify_time_axis(
|
|
61
|
+
self,
|
|
62
|
+
source_db_path: str,
|
|
63
|
+
target_db_path: str,
|
|
64
|
+
new_start: str,
|
|
65
|
+
new_end: str,
|
|
66
|
+
new_resolution_minutes: int,
|
|
67
|
+
new_network_name: Optional[str] = None,
|
|
68
|
+
convert_timeseries: bool = True,
|
|
69
|
+
progress_callback: Optional[Callable[[float, str], None]] = None,
|
|
70
|
+
) -> Dict[str, Any]:
|
|
71
|
+
"""
|
|
72
|
+
Create a new database with modified time axis and resampled timeseries data.
|
|
73
|
+
|
|
74
|
+
This method creates a copy of the source database and modifies its time axis,
|
|
75
|
+
optionally resampling all timeseries data to match the new time periods.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
source_db_path: Path to source database
|
|
79
|
+
target_db_path: Path to target database (will be created)
|
|
80
|
+
new_start: Start datetime as ISO string (e.g., "2024-01-01 00:00:00")
|
|
81
|
+
new_end: End datetime as ISO string (e.g., "2024-12-31 23:00:00")
|
|
82
|
+
new_resolution_minutes: New time resolution in minutes (e.g., 60 for hourly)
|
|
83
|
+
new_network_name: Optional new name for the network
|
|
84
|
+
convert_timeseries: If True, resample timeseries data. If False, wipe all timeseries
|
|
85
|
+
progress_callback: Optional callback for progress updates (progress: float, message: str)
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Dictionary with results and statistics:
|
|
89
|
+
- success: bool
|
|
90
|
+
- source_db_path: str
|
|
91
|
+
- target_db_path: str
|
|
92
|
+
- new_periods_count: int
|
|
93
|
+
- new_resolution_minutes: int
|
|
94
|
+
- new_start: str
|
|
95
|
+
- new_end: str
|
|
96
|
+
- processing_stats: dict with detailed statistics
|
|
97
|
+
|
|
98
|
+
Raises:
|
|
99
|
+
ValueError: If time parameters are invalid
|
|
100
|
+
FileNotFoundError: If source database doesn't exist
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
def update_progress(progress: float, message: str):
|
|
104
|
+
if progress_callback:
|
|
105
|
+
progress_callback(progress, message)
|
|
106
|
+
logger.info(f"[{progress:.1f}%] {message}")
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
update_progress(0, "Starting time axis modification...")
|
|
110
|
+
|
|
111
|
+
# Validate inputs
|
|
112
|
+
start_dt = pd.Timestamp(new_start)
|
|
113
|
+
end_dt = pd.Timestamp(new_end)
|
|
114
|
+
|
|
115
|
+
if end_dt <= start_dt:
|
|
116
|
+
raise ValueError("End time must be after start time")
|
|
117
|
+
|
|
118
|
+
if new_resolution_minutes <= 0:
|
|
119
|
+
raise ValueError("Time resolution must be positive")
|
|
120
|
+
|
|
121
|
+
update_progress(5, "Validating source database...")
|
|
122
|
+
|
|
123
|
+
# Validate source database and get network info
|
|
124
|
+
with database_context(source_db_path, read_only=True) as source_conn:
|
|
125
|
+
network_info = get_network_info(source_conn)
|
|
126
|
+
if not network_info:
|
|
127
|
+
raise ValueError("No network metadata found in source database")
|
|
128
|
+
|
|
129
|
+
# Generate new time periods
|
|
130
|
+
update_progress(10, "Generating new time axis...")
|
|
131
|
+
new_time_periods = self._generate_time_periods(
|
|
132
|
+
start_dt, end_dt, new_resolution_minutes
|
|
133
|
+
)
|
|
134
|
+
update_progress(15, f"Generated {len(new_time_periods)} new time periods")
|
|
135
|
+
|
|
136
|
+
# Copy database schema and static data
|
|
137
|
+
update_progress(20, "Creating target database...")
|
|
138
|
+
self._copy_database_structure(
|
|
139
|
+
source_db_path,
|
|
140
|
+
target_db_path,
|
|
141
|
+
new_time_periods,
|
|
142
|
+
new_resolution_minutes,
|
|
143
|
+
new_network_name,
|
|
144
|
+
update_progress,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Process timeseries data based on convert_timeseries flag
|
|
148
|
+
if convert_timeseries:
|
|
149
|
+
update_progress(40, "Processing timeseries data...")
|
|
150
|
+
stats = self._process_all_timeseries(
|
|
151
|
+
source_db_path,
|
|
152
|
+
target_db_path,
|
|
153
|
+
new_time_periods,
|
|
154
|
+
new_resolution_minutes,
|
|
155
|
+
update_progress,
|
|
156
|
+
)
|
|
157
|
+
else:
|
|
158
|
+
update_progress(40, "Wiping timeseries data...")
|
|
159
|
+
stats = self._wipe_all_timeseries(target_db_path, update_progress)
|
|
160
|
+
|
|
161
|
+
update_progress(95, "Finalizing database...")
|
|
162
|
+
|
|
163
|
+
# Validate target database
|
|
164
|
+
with database_context(target_db_path, read_only=True) as target_conn:
|
|
165
|
+
target_network_info = get_network_info(target_conn)
|
|
166
|
+
if not target_network_info:
|
|
167
|
+
raise ValueError("Failed to create target network")
|
|
168
|
+
|
|
169
|
+
update_progress(100, "Time axis modification completed successfully")
|
|
170
|
+
|
|
171
|
+
return {
|
|
172
|
+
"success": True,
|
|
173
|
+
"source_db_path": source_db_path,
|
|
174
|
+
"target_db_path": target_db_path,
|
|
175
|
+
"new_periods_count": len(new_time_periods),
|
|
176
|
+
"original_resolution_minutes": None, # Could be calculated from source
|
|
177
|
+
"new_resolution_minutes": new_resolution_minutes,
|
|
178
|
+
"new_start": new_start,
|
|
179
|
+
"new_end": new_end,
|
|
180
|
+
"processing_stats": stats,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
except Exception as e:
|
|
184
|
+
logger.error(f"Time axis modification failed: {e}", exc_info=True)
|
|
185
|
+
|
|
186
|
+
# Clean up partial target file
|
|
187
|
+
try:
|
|
188
|
+
target_path = Path(target_db_path)
|
|
189
|
+
if target_path.exists():
|
|
190
|
+
target_path.unlink()
|
|
191
|
+
logger.info(f"Cleaned up partial target database: {target_db_path}")
|
|
192
|
+
except Exception as cleanup_error:
|
|
193
|
+
logger.warning(
|
|
194
|
+
f"Failed to clean up partial target database: {cleanup_error}"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
raise
|
|
198
|
+
|
|
199
|
+
def _generate_time_periods(
|
|
200
|
+
self, start: pd.Timestamp, end: pd.Timestamp, resolution_minutes: int
|
|
201
|
+
) -> List[TimePeriod]:
|
|
202
|
+
"""Generate new time periods based on start, end, and resolution."""
|
|
203
|
+
|
|
204
|
+
# Create time range
|
|
205
|
+
freq_str = self._minutes_to_freq_str(resolution_minutes)
|
|
206
|
+
timestamps = pd.date_range(
|
|
207
|
+
start=start, end=end, freq=freq_str, inclusive="both"
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
periods = []
|
|
211
|
+
for i, timestamp in enumerate(timestamps):
|
|
212
|
+
# Convert to Unix timestamp (seconds)
|
|
213
|
+
unix_timestamp = int(timestamp.timestamp())
|
|
214
|
+
|
|
215
|
+
# Create formatted time string (UTC to avoid DST issues)
|
|
216
|
+
formatted_time = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
|
217
|
+
|
|
218
|
+
periods.append(
|
|
219
|
+
TimePeriod(
|
|
220
|
+
timestamp=unix_timestamp,
|
|
221
|
+
period_index=i,
|
|
222
|
+
formatted_time=formatted_time,
|
|
223
|
+
)
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
logger.info(
|
|
227
|
+
f"Generated {len(periods)} time periods from {start} to {end} "
|
|
228
|
+
f"at {resolution_minutes}min resolution"
|
|
229
|
+
)
|
|
230
|
+
return periods
|
|
231
|
+
|
|
232
|
+
def _copy_database_structure(
|
|
233
|
+
self,
|
|
234
|
+
source_path: str,
|
|
235
|
+
target_path: str,
|
|
236
|
+
new_periods: List[TimePeriod],
|
|
237
|
+
new_resolution_minutes: int,
|
|
238
|
+
new_network_name: Optional[str],
|
|
239
|
+
progress_callback: Callable[[float, str], None],
|
|
240
|
+
):
|
|
241
|
+
"""Copy database schema and static data, update time periods."""
|
|
242
|
+
|
|
243
|
+
# Copy entire database file as starting point
|
|
244
|
+
progress_callback(25, "Copying database file...")
|
|
245
|
+
shutil.copy2(source_path, target_path)
|
|
246
|
+
|
|
247
|
+
# Connect to target database and update time periods
|
|
248
|
+
progress_callback(30, "Updating time periods...")
|
|
249
|
+
|
|
250
|
+
with database_context(target_path) as target_conn:
|
|
251
|
+
# Clear existing time periods (single row for entire database)
|
|
252
|
+
target_conn.execute("DELETE FROM network_time_periods")
|
|
253
|
+
|
|
254
|
+
# Insert new optimized time periods metadata
|
|
255
|
+
if new_periods:
|
|
256
|
+
period_count = len(new_periods)
|
|
257
|
+
start_timestamp = new_periods[0].timestamp
|
|
258
|
+
|
|
259
|
+
# Calculate interval in seconds
|
|
260
|
+
if len(new_periods) > 1:
|
|
261
|
+
interval_seconds = (
|
|
262
|
+
new_periods[1].timestamp - new_periods[0].timestamp
|
|
263
|
+
)
|
|
264
|
+
else:
|
|
265
|
+
interval_seconds = new_resolution_minutes * 60
|
|
266
|
+
|
|
267
|
+
target_conn.execute(
|
|
268
|
+
"""
|
|
269
|
+
INSERT INTO network_time_periods (period_count, start_timestamp, interval_seconds)
|
|
270
|
+
VALUES (?, ?, ?)
|
|
271
|
+
""",
|
|
272
|
+
(period_count, start_timestamp, interval_seconds),
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Update network metadata with new time range and resolution
|
|
276
|
+
start_time = new_periods[0].formatted_time if new_periods else None
|
|
277
|
+
end_time = new_periods[-1].formatted_time if new_periods else None
|
|
278
|
+
|
|
279
|
+
# Convert resolution to ISO 8601 duration format
|
|
280
|
+
if new_resolution_minutes < 60:
|
|
281
|
+
time_interval = f"PT{new_resolution_minutes}M"
|
|
282
|
+
elif new_resolution_minutes % 60 == 0:
|
|
283
|
+
hours = new_resolution_minutes // 60
|
|
284
|
+
time_interval = f"PT{hours}H"
|
|
285
|
+
else:
|
|
286
|
+
time_interval = f"PT{new_resolution_minutes}M"
|
|
287
|
+
|
|
288
|
+
# Update network metadata including name if provided
|
|
289
|
+
if new_network_name:
|
|
290
|
+
target_conn.execute(
|
|
291
|
+
"""
|
|
292
|
+
UPDATE network_metadata
|
|
293
|
+
SET name = ?, time_start = ?, time_end = ?, time_interval = ?, updated_at = datetime('now')
|
|
294
|
+
""",
|
|
295
|
+
(new_network_name, start_time, end_time, time_interval),
|
|
296
|
+
)
|
|
297
|
+
else:
|
|
298
|
+
target_conn.execute(
|
|
299
|
+
"""
|
|
300
|
+
UPDATE network_metadata
|
|
301
|
+
SET time_start = ?, time_end = ?, time_interval = ?, updated_at = datetime('now')
|
|
302
|
+
""",
|
|
303
|
+
(start_time, end_time, time_interval),
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
# Clear data that becomes invalid with new time axis
|
|
307
|
+
progress_callback(32, "Clearing time-dependent data...")
|
|
308
|
+
|
|
309
|
+
# Clear solve results (they're tied to specific time periods)
|
|
310
|
+
target_conn.execute("DELETE FROM network_solve_results")
|
|
311
|
+
|
|
312
|
+
# Clear year-based solve results (also tied to specific time periods)
|
|
313
|
+
target_conn.execute("DELETE FROM network_solve_results_by_year")
|
|
314
|
+
|
|
315
|
+
# Clear any cached data in network_data_store that might be time-dependent
|
|
316
|
+
target_conn.execute(
|
|
317
|
+
"""
|
|
318
|
+
DELETE FROM network_data_store
|
|
319
|
+
WHERE category IN ('results', 'statistics', 'cache')
|
|
320
|
+
"""
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
target_conn.commit()
|
|
324
|
+
progress_callback(35, f"Updated time periods: {len(new_periods)} periods")
|
|
325
|
+
|
|
326
|
+
def _process_all_timeseries(
|
|
327
|
+
self,
|
|
328
|
+
source_path: str,
|
|
329
|
+
target_path: str,
|
|
330
|
+
new_periods: List[TimePeriod],
|
|
331
|
+
new_resolution_minutes: int,
|
|
332
|
+
progress_callback: Callable[[float, str], None],
|
|
333
|
+
) -> Dict[str, Any]:
|
|
334
|
+
"""Process all timeseries attributes across all scenarios."""
|
|
335
|
+
|
|
336
|
+
stats = {
|
|
337
|
+
"total_components_processed": 0,
|
|
338
|
+
"total_attributes_processed": 0,
|
|
339
|
+
"total_scenarios_processed": 0,
|
|
340
|
+
"attributes_by_component_type": {},
|
|
341
|
+
"errors": [],
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
try:
|
|
345
|
+
# Find all components with timeseries data
|
|
346
|
+
components_with_timeseries = self._find_components_with_timeseries(
|
|
347
|
+
source_path
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
total_items = len(components_with_timeseries)
|
|
351
|
+
progress_callback(
|
|
352
|
+
45, f"Found {total_items} timeseries attributes to process"
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
if total_items == 0:
|
|
356
|
+
progress_callback(90, "No timeseries data found to process")
|
|
357
|
+
return stats
|
|
358
|
+
|
|
359
|
+
# Group by scenario for batch processing efficiency
|
|
360
|
+
by_scenario: Dict[Optional[int], List[Tuple[int, str]]] = {}
|
|
361
|
+
for comp_id, attr_name, scenario_id in components_with_timeseries:
|
|
362
|
+
if scenario_id not in by_scenario:
|
|
363
|
+
by_scenario[scenario_id] = []
|
|
364
|
+
by_scenario[scenario_id].append((comp_id, attr_name))
|
|
365
|
+
|
|
366
|
+
stats["total_scenarios_processed"] = len(by_scenario)
|
|
367
|
+
logger.info(
|
|
368
|
+
f"Processing timeseries across {len(by_scenario)} scenarios: "
|
|
369
|
+
f"{list(by_scenario.keys())}"
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
# Process each scenario
|
|
373
|
+
processed = 0
|
|
374
|
+
for scenario_id, items in by_scenario.items():
|
|
375
|
+
scenario_name = f"scenario_{scenario_id}" if scenario_id else "base"
|
|
376
|
+
progress_callback(
|
|
377
|
+
45 + (processed * 40 / total_items),
|
|
378
|
+
f"Processing scenario {scenario_name} ({len(items)} attributes)",
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
for comp_id, attr_name in items:
|
|
382
|
+
try:
|
|
383
|
+
# Get component type for statistics
|
|
384
|
+
comp_type = self._get_component_type(source_path, comp_id)
|
|
385
|
+
if comp_type not in stats["attributes_by_component_type"]:
|
|
386
|
+
stats["attributes_by_component_type"][comp_type] = 0
|
|
387
|
+
|
|
388
|
+
# Load original timeseries using pyconvexity API
|
|
389
|
+
original_timeseries = get_timeseries(
|
|
390
|
+
source_path, comp_id, attr_name, scenario_id
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
if not original_timeseries or not original_timeseries.values:
|
|
394
|
+
logger.warning(
|
|
395
|
+
f"No timeseries data found for component {comp_id}, "
|
|
396
|
+
f"attribute {attr_name}"
|
|
397
|
+
)
|
|
398
|
+
continue
|
|
399
|
+
|
|
400
|
+
# Get original time periods to understand the time mapping
|
|
401
|
+
with database_context(
|
|
402
|
+
source_path, read_only=True
|
|
403
|
+
) as source_conn:
|
|
404
|
+
original_periods = get_network_time_periods(source_conn)
|
|
405
|
+
|
|
406
|
+
# Resample to new time axis with proper time-based slicing
|
|
407
|
+
resampled_values = self._resample_timeseries_with_time_mapping(
|
|
408
|
+
original_timeseries.values,
|
|
409
|
+
original_periods,
|
|
410
|
+
new_periods,
|
|
411
|
+
new_resolution_minutes,
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
if resampled_values:
|
|
415
|
+
# Save to target database using pyconvexity API
|
|
416
|
+
set_timeseries(
|
|
417
|
+
target_path,
|
|
418
|
+
comp_id,
|
|
419
|
+
attr_name,
|
|
420
|
+
resampled_values,
|
|
421
|
+
scenario_id,
|
|
422
|
+
)
|
|
423
|
+
stats["attributes_by_component_type"][comp_type] += 1
|
|
424
|
+
stats["total_attributes_processed"] += 1
|
|
425
|
+
|
|
426
|
+
processed += 1
|
|
427
|
+
|
|
428
|
+
if processed % 10 == 0: # Update progress every 10 items
|
|
429
|
+
progress = 45 + (processed * 40 / total_items)
|
|
430
|
+
progress_callback(
|
|
431
|
+
progress,
|
|
432
|
+
f"Processed {processed}/{total_items} attributes",
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
except Exception as e:
|
|
436
|
+
error_msg = (
|
|
437
|
+
f"Failed to process component {comp_id}, "
|
|
438
|
+
f"attribute {attr_name}: {str(e)}"
|
|
439
|
+
)
|
|
440
|
+
logger.error(error_msg)
|
|
441
|
+
stats["errors"].append(error_msg)
|
|
442
|
+
continue
|
|
443
|
+
|
|
444
|
+
# Count unique components processed
|
|
445
|
+
unique_components = set()
|
|
446
|
+
for comp_id, _, _ in components_with_timeseries:
|
|
447
|
+
unique_components.add(comp_id)
|
|
448
|
+
stats["total_components_processed"] = len(unique_components)
|
|
449
|
+
|
|
450
|
+
progress_callback(
|
|
451
|
+
87,
|
|
452
|
+
f"Completed processing {stats['total_attributes_processed']} "
|
|
453
|
+
"timeseries attributes",
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
# VACUUM the database to reclaim space from replaced timeseries data
|
|
457
|
+
progress_callback(88, "Reclaiming database space...")
|
|
458
|
+
with database_context(target_path) as conn:
|
|
459
|
+
conn.execute("VACUUM")
|
|
460
|
+
progress_callback(
|
|
461
|
+
90,
|
|
462
|
+
f"Database space reclaimed. Processed "
|
|
463
|
+
f"{stats['total_attributes_processed']} timeseries attributes.",
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
except Exception as e:
|
|
467
|
+
logger.error(f"Error processing timeseries data: {e}", exc_info=True)
|
|
468
|
+
stats["errors"].append(f"General processing error: {str(e)}")
|
|
469
|
+
raise
|
|
470
|
+
|
|
471
|
+
return stats
|
|
472
|
+
|
|
473
|
+
def _wipe_all_timeseries(
|
|
474
|
+
self, target_db_path: str, progress_callback: Callable[[float, str], None]
|
|
475
|
+
) -> Dict[str, Any]:
|
|
476
|
+
"""Wipes all timeseries attributes."""
|
|
477
|
+
|
|
478
|
+
with database_context(target_db_path) as target_conn:
|
|
479
|
+
try:
|
|
480
|
+
# Count timeseries attributes before deletion for statistics
|
|
481
|
+
cursor = target_conn.execute(
|
|
482
|
+
"""
|
|
483
|
+
SELECT COUNT(*) FROM component_attributes
|
|
484
|
+
WHERE storage_type = 'timeseries'
|
|
485
|
+
"""
|
|
486
|
+
)
|
|
487
|
+
count_before = cursor.fetchone()[0]
|
|
488
|
+
|
|
489
|
+
# Clear all timeseries attributes
|
|
490
|
+
target_conn.execute(
|
|
491
|
+
"""
|
|
492
|
+
DELETE FROM component_attributes
|
|
493
|
+
WHERE storage_type = 'timeseries'
|
|
494
|
+
"""
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
target_conn.commit()
|
|
498
|
+
progress_callback(
|
|
499
|
+
85, f"Wiped {count_before} timeseries attributes from network."
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
# VACUUM the database to reclaim space and reduce file size
|
|
503
|
+
progress_callback(87, "Reclaiming database space...")
|
|
504
|
+
target_conn.execute("VACUUM")
|
|
505
|
+
progress_callback(
|
|
506
|
+
90,
|
|
507
|
+
f"Database space reclaimed. Wiped {count_before} timeseries attributes.",
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
return {
|
|
511
|
+
"total_attributes_wiped": count_before,
|
|
512
|
+
"total_components_processed": 0,
|
|
513
|
+
"total_attributes_processed": 0,
|
|
514
|
+
"total_scenarios_processed": 0,
|
|
515
|
+
"attributes_by_component_type": {},
|
|
516
|
+
"errors": [],
|
|
517
|
+
}
|
|
518
|
+
except Exception as e:
|
|
519
|
+
logger.error(
|
|
520
|
+
f"Failed to wipe timeseries attributes: {e}", exc_info=True
|
|
521
|
+
)
|
|
522
|
+
return {
|
|
523
|
+
"total_attributes_wiped": 0,
|
|
524
|
+
"total_components_processed": 0,
|
|
525
|
+
"total_attributes_processed": 0,
|
|
526
|
+
"total_scenarios_processed": 0,
|
|
527
|
+
"attributes_by_component_type": {},
|
|
528
|
+
"errors": [f"Failed to wipe timeseries attributes: {str(e)}"],
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
def _find_components_with_timeseries(
|
|
532
|
+
self, db_path: str
|
|
533
|
+
) -> List[Tuple[int, str, Optional[int]]]:
|
|
534
|
+
"""Find all components that have timeseries attributes."""
|
|
535
|
+
|
|
536
|
+
with database_context(db_path, read_only=True) as conn:
|
|
537
|
+
cursor = conn.execute(
|
|
538
|
+
"""
|
|
539
|
+
SELECT DISTINCT component_id, attribute_name, scenario_id
|
|
540
|
+
FROM component_attributes
|
|
541
|
+
WHERE storage_type = 'timeseries'
|
|
542
|
+
AND timeseries_data IS NOT NULL
|
|
543
|
+
ORDER BY component_id, attribute_name, scenario_id
|
|
544
|
+
"""
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
results = cursor.fetchall()
|
|
548
|
+
logger.info(f"Found {len(results)} timeseries attributes in database")
|
|
549
|
+
|
|
550
|
+
return results
|
|
551
|
+
|
|
552
|
+
def _get_component_type(self, db_path: str, component_id: int) -> str:
|
|
553
|
+
"""Get component type for statistics tracking."""
|
|
554
|
+
with database_context(db_path, read_only=True) as conn:
|
|
555
|
+
cursor = conn.execute(
|
|
556
|
+
"SELECT component_type FROM components WHERE id = ?", (component_id,)
|
|
557
|
+
)
|
|
558
|
+
row = cursor.fetchone()
|
|
559
|
+
return row[0] if row else "UNKNOWN"
|
|
560
|
+
|
|
561
|
+
def _resample_timeseries_with_time_mapping(
|
|
562
|
+
self,
|
|
563
|
+
original_values: List[float],
|
|
564
|
+
original_periods: List[TimePeriod],
|
|
565
|
+
new_periods: List[TimePeriod],
|
|
566
|
+
new_resolution_minutes: int,
|
|
567
|
+
) -> List[float]:
|
|
568
|
+
"""
|
|
569
|
+
Resample timeseries data to new time axis with proper time-based slicing.
|
|
570
|
+
|
|
571
|
+
This method:
|
|
572
|
+
1. First slices the original data to match the new time range
|
|
573
|
+
2. Then resamples the sliced data to the new resolution
|
|
574
|
+
|
|
575
|
+
Args:
|
|
576
|
+
original_values: Original timeseries values
|
|
577
|
+
original_periods: Original time periods from source database
|
|
578
|
+
new_periods: New time periods for target database
|
|
579
|
+
new_resolution_minutes: New time resolution in minutes
|
|
580
|
+
|
|
581
|
+
Returns:
|
|
582
|
+
Resampled values list, or empty list if resampling fails
|
|
583
|
+
"""
|
|
584
|
+
|
|
585
|
+
if not original_values or not new_periods or not original_periods:
|
|
586
|
+
return []
|
|
587
|
+
|
|
588
|
+
try:
|
|
589
|
+
# Get time bounds for the new time axis
|
|
590
|
+
new_start_timestamp = new_periods[0].timestamp
|
|
591
|
+
new_end_timestamp = new_periods[-1].timestamp
|
|
592
|
+
|
|
593
|
+
logger.debug(
|
|
594
|
+
f"Original data: {len(original_values)} points, "
|
|
595
|
+
f"{len(original_periods)} periods"
|
|
596
|
+
)
|
|
597
|
+
logger.debug(
|
|
598
|
+
f"New time range: {new_periods[0].formatted_time} to "
|
|
599
|
+
f"{new_periods[-1].formatted_time}"
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
# Find the slice of original data that falls within the new time range
|
|
603
|
+
start_idx = 0
|
|
604
|
+
end_idx = len(original_periods)
|
|
605
|
+
|
|
606
|
+
# Find start index - first period >= new_start_timestamp
|
|
607
|
+
for i, period in enumerate(original_periods):
|
|
608
|
+
if period.timestamp >= new_start_timestamp:
|
|
609
|
+
start_idx = i
|
|
610
|
+
break
|
|
611
|
+
|
|
612
|
+
# Find end index - last period <= new_end_timestamp
|
|
613
|
+
for i in range(len(original_periods) - 1, -1, -1):
|
|
614
|
+
if original_periods[i].timestamp <= new_end_timestamp:
|
|
615
|
+
end_idx = i + 1 # +1 because slice end is exclusive
|
|
616
|
+
break
|
|
617
|
+
|
|
618
|
+
# Slice the original data to the new time range
|
|
619
|
+
if start_idx >= len(original_values):
|
|
620
|
+
logger.warning("Start index beyond original data range")
|
|
621
|
+
return []
|
|
622
|
+
|
|
623
|
+
end_idx = min(end_idx, len(original_values))
|
|
624
|
+
sliced_values = original_values[start_idx:end_idx]
|
|
625
|
+
sliced_periods = original_periods[start_idx:end_idx]
|
|
626
|
+
|
|
627
|
+
logger.debug(
|
|
628
|
+
f"Sliced data: {len(sliced_values)} points from index "
|
|
629
|
+
f"{start_idx} to {end_idx}"
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
if not sliced_values:
|
|
633
|
+
logger.warning("No data in the specified time range")
|
|
634
|
+
return []
|
|
635
|
+
|
|
636
|
+
# Now resample the sliced data to the new resolution
|
|
637
|
+
return self._resample_sliced_data(sliced_values, len(new_periods))
|
|
638
|
+
|
|
639
|
+
except Exception as e:
|
|
640
|
+
logger.error(
|
|
641
|
+
f"Failed to resample timeseries with time mapping: {e}", exc_info=True
|
|
642
|
+
)
|
|
643
|
+
# Return empty list rather than failing the entire operation
|
|
644
|
+
return []
|
|
645
|
+
|
|
646
|
+
def _resample_sliced_data(
|
|
647
|
+
self, sliced_values: List[float], target_length: int
|
|
648
|
+
) -> List[float]:
|
|
649
|
+
"""
|
|
650
|
+
Resample already time-sliced data to target length.
|
|
651
|
+
|
|
652
|
+
For downsampling (fewer periods): Use mean aggregation
|
|
653
|
+
For upsampling (more periods): Use interpolation
|
|
654
|
+
For same length: Return as-is
|
|
655
|
+
"""
|
|
656
|
+
|
|
657
|
+
if not sliced_values:
|
|
658
|
+
return []
|
|
659
|
+
|
|
660
|
+
try:
|
|
661
|
+
original_length = len(sliced_values)
|
|
662
|
+
|
|
663
|
+
if original_length == target_length:
|
|
664
|
+
# Same length, return as-is
|
|
665
|
+
return sliced_values
|
|
666
|
+
elif original_length > target_length:
|
|
667
|
+
# Downsample using mean aggregation for better accuracy
|
|
668
|
+
return self._downsample_with_mean(sliced_values, target_length)
|
|
669
|
+
else:
|
|
670
|
+
# Upsample using linear interpolation
|
|
671
|
+
return self._upsample_with_interpolation(sliced_values, target_length)
|
|
672
|
+
|
|
673
|
+
except Exception as e:
|
|
674
|
+
logger.error(f"Failed to resample sliced data: {e}", exc_info=True)
|
|
675
|
+
return []
|
|
676
|
+
|
|
677
|
+
def _downsample_with_mean(
|
|
678
|
+
self, values: List[float], target_length: int
|
|
679
|
+
) -> List[float]:
|
|
680
|
+
"""Downsample using mean aggregation for better accuracy than simple sampling."""
|
|
681
|
+
if target_length >= len(values):
|
|
682
|
+
return values
|
|
683
|
+
|
|
684
|
+
# Calculate how many original points to average for each new point
|
|
685
|
+
chunk_size = len(values) / target_length
|
|
686
|
+
resampled = []
|
|
687
|
+
|
|
688
|
+
for i in range(target_length):
|
|
689
|
+
start_idx = int(i * chunk_size)
|
|
690
|
+
end_idx = int((i + 1) * chunk_size)
|
|
691
|
+
|
|
692
|
+
# Handle the last chunk to include any remaining values
|
|
693
|
+
if i == target_length - 1:
|
|
694
|
+
end_idx = len(values)
|
|
695
|
+
|
|
696
|
+
# Calculate mean of the chunk
|
|
697
|
+
chunk_values = values[start_idx:end_idx]
|
|
698
|
+
if chunk_values:
|
|
699
|
+
mean_value = sum(chunk_values) / len(chunk_values)
|
|
700
|
+
resampled.append(mean_value)
|
|
701
|
+
else:
|
|
702
|
+
# Fallback to last known value
|
|
703
|
+
resampled.append(values[start_idx] if start_idx < len(values) else 0.0)
|
|
704
|
+
|
|
705
|
+
return resampled
|
|
706
|
+
|
|
707
|
+
def _upsample_with_interpolation(
|
|
708
|
+
self, values: List[float], target_length: int
|
|
709
|
+
) -> List[float]:
|
|
710
|
+
"""Upsample using linear interpolation for smoother results."""
|
|
711
|
+
if target_length <= len(values):
|
|
712
|
+
return values[:target_length]
|
|
713
|
+
|
|
714
|
+
# Use numpy for efficient interpolation
|
|
715
|
+
original_indices = np.linspace(0, len(values) - 1, len(values))
|
|
716
|
+
target_indices = np.linspace(0, len(values) - 1, target_length)
|
|
717
|
+
|
|
718
|
+
# Perform linear interpolation
|
|
719
|
+
interpolated = np.interp(target_indices, original_indices, values)
|
|
720
|
+
|
|
721
|
+
return interpolated.tolist()
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
pyconvexity/__init__.py,sha256=
|
|
2
|
-
pyconvexity/_version.py,sha256=
|
|
1
|
+
pyconvexity/__init__.py,sha256=Prol-EntlU_jWLR3D55ZRYqkLnenLnt5cXVc_NT1cI4,5934
|
|
2
|
+
pyconvexity/_version.py,sha256=eZ1bOun1DDVV0YLOBW4wj2FP1ajReLjbIrGmzN7ASBw,22
|
|
3
3
|
pyconvexity/dashboard.py,sha256=7x04Hr-EwzTAf-YJdHzfV83Gf2etltwtzwh_bCYJ5lk,8579
|
|
4
4
|
pyconvexity/timeseries.py,sha256=QdKbiqjAlxkJATyKm2Kelx1Ea2PsAnnCYfVLU5VER1Y,11085
|
|
5
5
|
pyconvexity/core/__init__.py,sha256=gdyyHNqOc4h9Nfe9u6NA936GNzH6coGNCMgBvvvOnGE,1196
|
|
@@ -34,11 +34,14 @@ pyconvexity/solvers/pypsa/batch_loader.py,sha256=ZgOcZqMnMS3TOYTq2Ly2O4cuwhNNAic
|
|
|
34
34
|
pyconvexity/solvers/pypsa/builder.py,sha256=1ZU68Wtl_jQSXHzspKQDkR6bxAVU1nKvPfnPUl0aO3k,23256
|
|
35
35
|
pyconvexity/solvers/pypsa/clearing_price.py,sha256=HdAk7GPfJFVI4t6mL0zQGEOMAvuyfpl0yNCnah1ZGH0,29164
|
|
36
36
|
pyconvexity/solvers/pypsa/constraints.py,sha256=20WliFDhPQGMAsS4VOTU8LZJpsFpLVRHpNsZW49GTcc,16397
|
|
37
|
-
pyconvexity/solvers/pypsa/solver.py,sha256=
|
|
37
|
+
pyconvexity/solvers/pypsa/solver.py,sha256=pNI9ch0vO5q-8mWc3RHTscWB_ymj4s2lVJQ_e2nbzHY,44417
|
|
38
38
|
pyconvexity/solvers/pypsa/storage.py,sha256=nbONOBnunq3tyexa5yDUsT9xdxieUfrqhoM76_2HIGg,94956
|
|
39
|
+
pyconvexity/transformations/__init__.py,sha256=JfTk0b2O3KM22Dcem2ZnNvDDBmlmqS2X3Q_cO0H3r44,406
|
|
40
|
+
pyconvexity/transformations/api.py,sha256=t_kAAk9QSF1YTlrTM7BECd_v08jUgXVV6e9iX2M0aAg,3694
|
|
41
|
+
pyconvexity/transformations/time_axis.py,sha256=VyQPp09PyIr7IlxoKPeZCMkHPKPcIhI9ap_6kCyzjyk,28362
|
|
39
42
|
pyconvexity/validation/__init__.py,sha256=VJNZlFoWABsWwUKktNk2jbtXIepH5omvC0WtsTS7o3o,583
|
|
40
43
|
pyconvexity/validation/rules.py,sha256=GiNadc8hvbWBr09vUkGiLLTmSdvtNSeGLFwvCjlikYY,9241
|
|
41
|
-
pyconvexity-0.5.
|
|
42
|
-
pyconvexity-0.5.
|
|
43
|
-
pyconvexity-0.5.
|
|
44
|
-
pyconvexity-0.5.
|
|
44
|
+
pyconvexity-0.5.1.dist-info/METADATA,sha256=DUDViqyeOCBaSihifxoHTu7P4tjBms1JU90L3jGdbxc,4967
|
|
45
|
+
pyconvexity-0.5.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
46
|
+
pyconvexity-0.5.1.dist-info/top_level.txt,sha256=wFPEDXVaebR3JO5Tt3HNse-ws5aROCcxEco15d6j64s,12
|
|
47
|
+
pyconvexity-0.5.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|