wings-quantum 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wings/campaign.py ADDED
@@ -0,0 +1,661 @@
1
+ """Campaign management and convenience functions."""
2
+
3
+ import glob
4
+ import json
5
+ import logging
6
+ import os
7
+ import pickle
8
+ import time
9
+ from concurrent.futures import ProcessPoolExecutor
10
+ from datetime import datetime
11
+ from typing import Any, Optional
12
+
13
+ import numpy as np
14
+
15
+ from .config import CampaignConfig, OptimizationPipeline, OptimizerConfig, TargetFunction
16
+ from .optimizer import GaussianOptimizer
17
+ from .paths import get_path_config
18
+ from .results import CampaignResults, RunResult
19
+ from .types import ParameterArray
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ __all__ = [
24
+ "OptimizationManager",
25
+ "run_production_campaign",
26
+ "quick_optimization",
27
+ "load_campaign_results",
28
+ "list_campaigns",
29
+ ]
30
+
31
+
32
+ class OptimizationManager:
33
+ """
34
+ Production manager for large-scale optimization campaigns.
35
+
36
+ Features:
37
+ - Run thousands of optimizations
38
+ - Automatic checkpointing and resume
39
+ - Parallel execution with GPU support
40
+ - Result aggregation and analysis
41
+ - Progress tracking and logging
42
+
43
+ Example:
44
+ config = CampaignConfig(n_qubits=8, sigma=0.5, total_runs=1000)
45
+ manager = OptimizationManager(config)
46
+ results = manager.run_campaign()
47
+ results.print_summary()
48
+ """
49
+
50
+ def __init__(self, config: CampaignConfig) -> None:
51
+ self.config: CampaignConfig = config
52
+ self.results: CampaignResults = CampaignResults(config)
53
+ # Checkpoint tracking
54
+ self._checkpoint_file: str = os.path.join(config.checkpoint_dir, "checkpoint.pkl")
55
+ self._completed_runs: set[int] = set()
56
+
57
+ # Logging
58
+ self._log_file = os.path.join(config.output_dir, "campaign.log")
59
+
60
+ # Resume from checkpoint if requested
61
+ if config.resume_from_checkpoint:
62
+ self._try_resume()
63
+
64
+ # Save config
65
+ config.save()
66
+
67
+ self._log(f"OptimizationManager initialized for campaign: {config.campaign_name}")
68
+ self._log(f" Total runs: {config.total_runs}")
69
+ self._log(f" Target infidelity: {config.target_infidelity:.0e}")
70
+ self._log(f" Output directory: {config.output_dir}")
71
+
72
+ def _log(self, message: str, level: int = 1) -> None:
73
+ """Log message to file and optionally console"""
74
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
75
+ log_entry = f"[{timestamp}] {message}"
76
+
77
+ # Write to file
78
+ with open(self._log_file, "a") as f:
79
+ f.write(log_entry + "\n")
80
+
81
+ # Print if verbose enough
82
+ if self.config.verbose >= level:
83
+ print(message)
84
+
85
+ def _try_resume(self) -> None:
86
+ """Attempt to resume from checkpoint"""
87
+ if not os.path.exists(self._checkpoint_file):
88
+ self._log("No checkpoint found, starting fresh")
89
+ return
90
+
91
+ try:
92
+ with open(self._checkpoint_file, "rb") as f:
93
+ checkpoint = pickle.load(f)
94
+
95
+ self.results = checkpoint["results"]
96
+ self._completed_runs = checkpoint["completed_runs"]
97
+
98
+ self._log(f"Resumed from checkpoint: {len(self._completed_runs)} runs completed")
99
+ self._log(
100
+ f" Best fidelity so far: {self.results.best_result.fidelity:.12f}"
101
+ if self.results.best_result
102
+ else " No successful runs yet"
103
+ )
104
+
105
+ except (OSError, FileNotFoundError) as e:
106
+ self._log(f"Could not read checkpoint file: {e}")
107
+ self._completed_runs = set()
108
+ except (pickle.UnpicklingError, KeyError, EOFError) as e:
109
+ self._log(f"Checkpoint file corrupted: {e}")
110
+ self._log("Starting fresh")
111
+ self._completed_runs = set()
112
+ except Exception as e:
113
+ # Catch-all for truly unexpected errors, but log the type
114
+ self._log(f"Unexpected error resuming checkpoint ({type(e).__name__}): {e}")
115
+ self._completed_runs = set()
116
+
117
+ def _save_checkpoint(self) -> None:
118
+ """Save checkpoint to disk"""
119
+ checkpoint = {
120
+ "results": self.results,
121
+ "completed_runs": self._completed_runs,
122
+ "timestamp": datetime.now().isoformat(),
123
+ }
124
+
125
+ # Write to temp file first, then rename (atomic)
126
+ temp_file = self._checkpoint_file + ".tmp"
127
+ with open(temp_file, "wb") as f:
128
+ pickle.dump(checkpoint, f)
129
+
130
+ os.replace(temp_file, self._checkpoint_file)
131
+
132
+ # Also save results
133
+ self.results.save()
134
+
135
+ def _create_optimizer_config(self) -> OptimizerConfig:
136
+ """Create OptimizerConfig from CampaignConfig"""
137
+ return OptimizerConfig(
138
+ n_qubits=self.config.n_qubits,
139
+ sigma=self.config.sigma,
140
+ x0=self.config.x0,
141
+ box_size=self.config.box_size,
142
+ # Target function settings
143
+ target_function=self.config.target_function,
144
+ gamma=self.config.gamma,
145
+ custom_target_fn=self.config.custom_target_fn,
146
+ # Optimizer settings
147
+ method="L-BFGS-B",
148
+ max_iter=self.config.max_iter_per_run,
149
+ max_fun=self.config.max_iter_per_run * 2,
150
+ tolerance=self.config.tolerance_per_run,
151
+ gtol=self.config.tolerance_per_run,
152
+ high_precision=True,
153
+ use_analytic_gradients=True,
154
+ verbose=False, # Quiet for mass runs
155
+ # GPU settings
156
+ use_gpu=self.config.use_gpu,
157
+ use_custatevec=self.config.use_custatevec,
158
+ gpu_precision=self.config.gpu_precision,
159
+ # Multi-GPU settings
160
+ use_multi_gpu=self.config.use_multi_gpu,
161
+ gpu_device_ids=self.config.gpu_device_ids,
162
+ # Other
163
+ parallel_gradients=False, # Avoid nested parallelism
164
+ target_fidelity=self.config.target_fidelity,
165
+ )
166
+
167
+ def _run_single_optimization(self, run_id: int) -> RunResult:
168
+ """
169
+ Execute a single optimization run.
170
+
171
+ Returns RunResult with success/failure status.
172
+ """
173
+ from .optimizer import GaussianOptimizer
174
+
175
+ strategy = self.config.get_strategy_for_run(run_id)
176
+ seed = self.config.get_seed_for_run(run_id)
177
+
178
+ start_time = time.time()
179
+
180
+ try:
181
+ # Set random seed for reproducibility
182
+ np.random.seed(seed)
183
+
184
+ # Create optimizer
185
+ opt_config = self._create_optimizer_config()
186
+ optimizer = GaussianOptimizer(opt_config)
187
+
188
+ # Get initial parameters based on strategy
189
+ if strategy == "perturb_best" and self.results.best_result is not None:
190
+ # Perturb from best known result
191
+ initial_params = self.results.best_result.params + 0.1 * np.random.randn(
192
+ opt_config.n_params
193
+ )
194
+ else:
195
+ initial_params = optimizer.get_initial_params(strategy)
196
+
197
+ # Run optimization
198
+ pipeline = OptimizationPipeline(
199
+ mode="ultra" if self.config.use_ultra_precision else "adaptive",
200
+ target_fidelity=self.config.target_fidelity,
201
+ max_total_time=self.config.ultra_precision_time_limit,
202
+ use_basin_hopping=self.config.use_ultra_precision,
203
+ verbose=False,
204
+ )
205
+ optimizer.run_optimization(pipeline, initial_params)
206
+
207
+ # Compute circuit statistics
208
+ final_psi = optimizer.get_statevector(optimizer.best_params)
209
+ circuit_stats = optimizer.compute_statistics(final_psi)
210
+
211
+ elapsed = time.time() - start_time
212
+
213
+ return RunResult(
214
+ run_id=run_id,
215
+ strategy=strategy,
216
+ seed=seed,
217
+ fidelity=optimizer.best_fidelity,
218
+ infidelity=1 - optimizer.best_fidelity,
219
+ params=optimizer.best_params.copy(),
220
+ circuit_std=circuit_stats["std"],
221
+ circuit_mean=circuit_stats["mean"],
222
+ n_evaluations=optimizer.n_evals,
223
+ time_seconds=elapsed,
224
+ success=True,
225
+ )
226
+
227
+ except Exception as e:
228
+ elapsed = time.time() - start_time
229
+ error_msg = f"{type(e).__name__}: {str(e)}"
230
+ self._log(f"Run {run_id} failed: {error_msg}", level=2)
231
+
232
+ return RunResult(
233
+ run_id=run_id,
234
+ strategy=strategy,
235
+ seed=seed,
236
+ fidelity=0.0,
237
+ infidelity=1.0,
238
+ params=None,
239
+ circuit_std=None,
240
+ circuit_mean=None,
241
+ n_evaluations=0,
242
+ time_seconds=elapsed,
243
+ success=False,
244
+ error_message=error_msg,
245
+ )
246
+
247
+ def _run_batch_sequential(self, run_ids: list[int]) -> list[RunResult]:
248
+ """Run a batch of optimizations sequentially (GPU-optimized)"""
249
+ results = []
250
+ for run_id in run_ids:
251
+ if run_id in self._completed_runs:
252
+ continue
253
+
254
+ if self.config.verbose >= 1:
255
+ print(f" Starting run {run_id}/{self.config.total_runs}...", end=" ")
256
+
257
+ result = self._run_single_optimization(run_id)
258
+ results.append(result)
259
+
260
+ if self.config.verbose >= 1:
261
+ if result.success:
262
+ print(f"F={result.fidelity:.10f}, time={result.time_seconds:.1f}s")
263
+ else:
264
+ print(f"FAILED: {result.error_message}")
265
+
266
+ return results
267
+
268
+ def _run_batch_parallel(self, run_ids: list[int]) -> list[RunResult]:
269
+ """Run a batch of optimizations in parallel (CPU mode)"""
270
+ # Filter out completed runs
271
+ run_ids = [r for r in run_ids if r not in self._completed_runs]
272
+
273
+ if not run_ids:
274
+ return []
275
+
276
+ results = []
277
+
278
+ # Use ProcessPoolExecutor for true parallelism
279
+ # Note: GPU/cuStateVec should be disabled for parallel runs
280
+ with ProcessPoolExecutor(max_workers=self.config.n_parallel_runs) as executor:
281
+ futures = {executor.submit(self._run_single_optimization, rid): rid for rid in run_ids}
282
+
283
+ for future in futures:
284
+ try:
285
+ result = future.result(timeout=self.config.ultra_precision_time_limit * 2)
286
+ results.append(result)
287
+
288
+ if self.config.verbose >= 1:
289
+ run_id = futures[future]
290
+ if result.success:
291
+ print(f" Run {run_id}: F={result.fidelity:.10f}")
292
+ else:
293
+ print(f" Run {run_id}: FAILED")
294
+
295
+ except Exception as e:
296
+ run_id = futures[future]
297
+ self._log(f"Run {run_id} exception: {e}")
298
+
299
+ return results
300
+
301
+ def run_campaign(self) -> CampaignResults:
302
+ """
303
+ Execute the full optimization campaign.
304
+
305
+ Returns:
306
+ CampaignResults with all run data and statistics
307
+ """
308
+ self._log("=" * 80)
309
+ self._log(f"STARTING OPTIMIZATION CAMPAIGN: {self.config.campaign_name}")
310
+ self._log("=" * 80)
311
+
312
+ self.results.start_time = time.time()
313
+
314
+ # Determine which runs still need to be done
315
+ all_run_ids = list(range(self.config.total_runs))
316
+ remaining_runs = [r for r in all_run_ids if r not in self._completed_runs]
317
+
318
+ self._log(f"Runs to complete: {len(remaining_runs)}/{self.config.total_runs}")
319
+
320
+ # Process in batches
321
+ batch_size = self.config.runs_per_batch
322
+ n_batches = (len(remaining_runs) + batch_size - 1) // batch_size
323
+
324
+ for batch_idx in range(n_batches):
325
+ batch_start = batch_idx * batch_size
326
+ batch_end = min(batch_start + batch_size, len(remaining_runs))
327
+ batch_run_ids = remaining_runs[batch_start:batch_end]
328
+
329
+ self._log(
330
+ f"\nBatch {batch_idx + 1}/{n_batches} (runs {batch_run_ids[0]}-{batch_run_ids[-1]})"
331
+ )
332
+
333
+ # Run batch (sequential for GPU, parallel for CPU)
334
+ if self.config.use_gpu or self.config.use_custatevec:
335
+ batch_results = self._run_batch_sequential(batch_run_ids)
336
+ else:
337
+ batch_results = self._run_batch_parallel(batch_run_ids)
338
+
339
+ # Process results
340
+ for result in batch_results:
341
+ self.results.add_result(result)
342
+ self._completed_runs.add(result.run_id)
343
+
344
+ # Checkpoint
345
+ if (batch_idx + 1) % max(1, self.config.checkpoint_interval // batch_size) == 0:
346
+ self._log("Saving checkpoint...")
347
+ self._save_checkpoint()
348
+
349
+ # Check if target achieved
350
+ if (
351
+ self.results.best_result
352
+ and self.results.best_result.fidelity >= self.config.target_fidelity
353
+ ):
354
+ self._log(f"\n TARGET ACHIEVED at run {self.results.best_result.run_id}!")
355
+ self._log(f" Fidelity: {self.results.best_result.fidelity:.15f}")
356
+ # Continue running to find potentially better solutions
357
+
358
+ self.results.end_time = time.time()
359
+
360
+ # Final checkpoint and save
361
+ self._save_checkpoint()
362
+
363
+ # Refinement phase
364
+ if self.config.refine_top_n > 0:
365
+ self._run_refinement_phase()
366
+
367
+ # Final save
368
+ self.results.save()
369
+
370
+ self._log("\nCampaign complete!")
371
+ self.results.print_summary()
372
+
373
+ return self.results
374
+
375
+ def _run_refinement_phase(self):
376
+ """Refine top N results with extended optimization"""
377
+ self._log("\n" + "=" * 60)
378
+ self._log("REFINEMENT PHASE")
379
+ self._log("=" * 60)
380
+
381
+ top_results = self.results.get_top_results(self.config.refine_top_n)
382
+
383
+ if not top_results:
384
+ self._log("No results to refine")
385
+ return
386
+
387
+ self._log(f"Refining top {len(top_results)} results")
388
+
389
+ # Create optimizer for refinement
390
+ opt_config = self._create_optimizer_config()
391
+ opt_config.verbose = True
392
+ optimizer = GaussianOptimizer(opt_config)
393
+
394
+ best_refined_fidelity = 0
395
+ best_refined_params = None
396
+
397
+ for i, result in enumerate(top_results):
398
+ self._log(
399
+ f"\nRefining result {i + 1}/{len(top_results)} (original F={result.fidelity:.12f})"
400
+ )
401
+
402
+ # Reset optimizer state
403
+ optimizer.best_fidelity = 0
404
+ optimizer.best_params = None
405
+ optimizer.n_evals = 0
406
+
407
+ try:
408
+ # Run ultra-precision refinement starting from this result
409
+ refined = optimizer.optimize_ultra_precision(
410
+ target_infidelity=self.config.target_infidelity / 10, # Even tighter
411
+ max_total_time=self.config.refinement_time_limit,
412
+ initial_params=result.params,
413
+ )
414
+
415
+ self._log(
416
+ f" Refined: F={refined['fidelity']:.15f} "
417
+ f"(improvement: {refined['fidelity'] - result.fidelity:.3e})"
418
+ )
419
+
420
+ if refined["fidelity"] > best_refined_fidelity:
421
+ best_refined_fidelity = refined["fidelity"]
422
+ best_refined_params = optimizer.best_params.copy()
423
+
424
+ except Exception as e:
425
+ self._log(f" Refinement failed: {e}")
426
+
427
+ # Update best result if refinement improved it
428
+ if best_refined_fidelity > self.results.best_result.fidelity:
429
+ self._log("\n* Refinement improved best result")
430
+ self._log(f" Before: {self.results.best_result.fidelity:.15f}")
431
+ self._log(f" After: {best_refined_fidelity:.15f}")
432
+
433
+ # Create new best result
434
+ final_psi = optimizer.get_statevector(best_refined_params)
435
+ circuit_stats = optimizer.compute_statistics(final_psi)
436
+
437
+ self.results.best_result = RunResult(
438
+ run_id=-1, # Refinement result
439
+ strategy="refinement",
440
+ seed=0,
441
+ fidelity=best_refined_fidelity,
442
+ infidelity=1 - best_refined_fidelity,
443
+ params=best_refined_params,
444
+ circuit_std=circuit_stats["std"],
445
+ circuit_mean=circuit_stats["mean"],
446
+ n_evaluations=optimizer.n_evals,
447
+ time_seconds=0,
448
+ success=True,
449
+ )
450
+
451
+ def get_best_parameters(self) -> np.ndarray:
452
+ """Get the best parameters found across all runs"""
453
+ if self.results.best_result is None:
454
+ raise ValueError("No successful optimization runs yet")
455
+ return self.results.best_result.params.copy()
456
+
457
+ def get_best_fidelity(self) -> float:
458
+ """Get the best fidelity achieved"""
459
+ if self.results.best_result is None:
460
+ return 0.0
461
+ return self.results.best_result.fidelity
462
+
463
+ def export_best_result(self, output_path: str = None) -> str:
464
+ """Export best result to file"""
465
+ if self.results.best_result is None:
466
+ raise ValueError("No successful optimization runs yet")
467
+
468
+ if output_path is None:
469
+ output_path = os.path.join(
470
+ self.config.output_dir,
471
+ f"best_result_q{self.config.n_qubits}_s{self.config.sigma:.2f}.npz",
472
+ )
473
+
474
+ np.savez(
475
+ output_path,
476
+ params=self.results.best_result.params,
477
+ fidelity=self.results.best_result.fidelity,
478
+ infidelity=self.results.best_result.infidelity,
479
+ n_qubits=self.config.n_qubits,
480
+ sigma=self.config.sigma,
481
+ box_size=self.config.box_size,
482
+ )
483
+
484
+ self._log(f"Best result exported to: {output_path}")
485
+ return output_path
486
+
487
+
488
+ def run_production_campaign(
489
+ n_qubits: int,
490
+ sigma: float,
491
+ total_runs: int = 1000,
492
+ target_infidelity: float = 1e-10,
493
+ box_size: Optional[float] = None,
494
+ x0: float = 0.0,
495
+ target_function: "TargetFunction" = None,
496
+ gamma: Optional[float] = None,
497
+ custom_target_fn=None,
498
+ use_multi_gpu: bool = False,
499
+ gpu_device_ids: Optional[list[int]] = None,
500
+ campaign_name: str = None,
501
+ resume: bool = True,
502
+ **kwargs,
503
+ ) -> CampaignResults:
504
+ """
505
+ Convenience function to run a production optimization campaign.
506
+
507
+ Args:
508
+ n_qubits: Number of qubits
509
+ sigma: Gaussian width
510
+ total_runs: Total number of optimization runs
511
+ target_infidelity: Target infidelity (1 - fidelity)
512
+ campaign_name: Name for campaign (auto-generated if None)
513
+ resume: Whether to resume from checkpoint if available
514
+ **kwargs: Additional arguments passed to CampaignConfig
515
+ x0: Wavefunction center position (default: 0.0)
516
+ target_function: Target wavefunction type (default: GAUSSIAN)
517
+ gamma: Width parameter for Lorentzian
518
+ custom_target_fn: Custom wavefunction function
519
+ use_multi_gpu: Enable multi-GPU acceleration
520
+ gpu_device_ids: Specific GPU device IDs to use
521
+
522
+ Returns:
523
+ CampaignResults with all results and statistics
524
+
525
+ Example:
526
+ results = run_production_campaign(
527
+ n_qubits=8,
528
+ sigma=0.5,
529
+ total_runs=1000,
530
+ target_infidelity=1e-11
531
+ )
532
+ print(f"Best fidelity: {results.best_result.fidelity}")
533
+ """
534
+ # Default to GAUSSIAN if not specified
535
+ if target_function is None:
536
+ target_function = TargetFunction.GAUSSIAN
537
+
538
+ config = CampaignConfig(
539
+ n_qubits=n_qubits,
540
+ sigma=sigma,
541
+ x0=x0,
542
+ box_size=box_size,
543
+ target_function=target_function,
544
+ gamma=gamma,
545
+ custom_target_fn=custom_target_fn,
546
+ total_runs=total_runs,
547
+ target_infidelity=target_infidelity,
548
+ use_multi_gpu=use_multi_gpu,
549
+ gpu_device_ids=gpu_device_ids,
550
+ campaign_name=campaign_name,
551
+ resume_from_checkpoint=resume,
552
+ **kwargs,
553
+ )
554
+
555
+ manager = OptimizationManager(config)
556
+ results = manager.run_campaign()
557
+
558
+ return results
559
+
560
+
561
+ def quick_optimization(
562
+ n_qubits: int,
563
+ sigma: float,
564
+ x0: float = 0.0,
565
+ target_function: "TargetFunction" = None,
566
+ n_runs: int = 50,
567
+ target_infidelity: float = 1e-8,
568
+ verbose: bool = True,
569
+ ) -> tuple[ParameterArray, float, dict[str, Any]]:
570
+ """
571
+ Quick optimization for testing or small-scale problems.
572
+
573
+ Args:
574
+ n_qubits: Number of qubits
575
+ sigma: Gaussian width
576
+ n_runs: Number of optimization runs (default 50)
577
+ target_infidelity: Target infidelity
578
+ verbose: Print progress
579
+
580
+ Returns:
581
+ (best_params, best_fidelity, results_dict)
582
+ """
583
+ if target_function is None:
584
+ target_function = TargetFunction.GAUSSIAN
585
+
586
+ config = CampaignConfig(
587
+ n_qubits=n_qubits,
588
+ sigma=sigma,
589
+ x0=x0,
590
+ target_function=target_function,
591
+ total_runs=n_runs,
592
+ target_infidelity=target_infidelity,
593
+ runs_per_batch=10,
594
+ checkpoint_interval=100, # Effectively no checkpointing
595
+ verbose=1 if verbose else 0,
596
+ use_ultra_precision=False, # Faster
597
+ max_iter_per_run=2000,
598
+ )
599
+
600
+ manager = OptimizationManager(config)
601
+ results = manager.run_campaign()
602
+
603
+ return (results.best_result.params, results.best_result.fidelity, results.get_statistics())
604
+
605
+
606
+ def load_campaign_results(campaign_name_or_path: str) -> CampaignResults:
607
+ """
608
+ Load results from a previous campaign.
609
+
610
+ Args:
611
+ campaign_name_or_path: Campaign name or full path to results file
612
+
613
+ Returns:
614
+ CampaignResults object
615
+ """
616
+ if os.path.exists(campaign_name_or_path):
617
+ return CampaignResults.load(campaign_name_or_path)
618
+
619
+ # Try to find in campaign directory
620
+ campaign_dir = get_path_config(verbose=False).campaign_dir / campaign_name_or_path
621
+ results_file = os.path.join(campaign_dir, "campaign_results.pkl")
622
+
623
+ if os.path.exists(results_file):
624
+ return CampaignResults.load(results_file)
625
+
626
+ raise FileNotFoundError(f"Could not find campaign results for: {campaign_name_or_path}")
627
+
628
+
629
+ def list_campaigns() -> list[dict[str, Any]]:
630
+ """
631
+ List all available campaigns.
632
+
633
+ Returns:
634
+ List of dicts with campaign info
635
+ """
636
+ campaigns = []
637
+
638
+ campaign_base = get_path_config(verbose=False).campaign_dir
639
+ for campaign_dir in glob.glob(str(campaign_base / "campaign_*")):
640
+ config_file = os.path.join(campaign_dir, "campaign_config.json")
641
+ results_file = os.path.join(campaign_dir, "campaign_results_summary.json")
642
+
643
+ info = {
644
+ "name": os.path.basename(campaign_dir),
645
+ "path": campaign_dir,
646
+ "has_config": os.path.exists(config_file),
647
+ "has_results": os.path.exists(results_file),
648
+ }
649
+
650
+ if os.path.exists(results_file):
651
+ try:
652
+ with open(results_file) as f:
653
+ summary = json.load(f)
654
+ info["best_fidelity"] = summary.get("statistics", {}).get("best_fidelity")
655
+ info["total_runs"] = summary.get("statistics", {}).get("total_runs")
656
+ except (OSError, json.JSONDecodeError, KeyError) as e:
657
+ logger.warning(f"Could not parse results file {results_file}: {e}")
658
+
659
+ campaigns.append(info)
660
+
661
+ return sorted(campaigns, key=lambda x: x["name"], reverse=True)