wings-quantum 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wings/__init__.py +251 -0
- wings/adam.py +132 -0
- wings/ansatz.py +207 -0
- wings/benchmarks.py +605 -0
- wings/campaign.py +661 -0
- wings/cli.py +377 -0
- wings/compat.py +132 -0
- wings/config.py +443 -0
- wings/convenience.py +259 -0
- wings/evaluators/__init__.py +19 -0
- wings/evaluators/cpu.py +72 -0
- wings/evaluators/custatevec.py +783 -0
- wings/evaluators/gpu.py +220 -0
- wings/export.py +243 -0
- wings/optimizer.py +1898 -0
- wings/paths.py +295 -0
- wings/py.typed +2 -0
- wings/results.py +255 -0
- wings/types.py +14 -0
- wings_quantum-0.1.0.dist-info/METADATA +491 -0
- wings_quantum-0.1.0.dist-info/RECORD +25 -0
- wings_quantum-0.1.0.dist-info/WHEEL +5 -0
- wings_quantum-0.1.0.dist-info/entry_points.txt +2 -0
- wings_quantum-0.1.0.dist-info/licenses/LICENSE.txt +21 -0
- wings_quantum-0.1.0.dist-info/top_level.txt +1 -0
wings/paths.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
import getpass
|
|
2
|
+
import os
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"PathConfig",
|
|
8
|
+
"get_path_config",
|
|
9
|
+
"LIB_DIR",
|
|
10
|
+
"CACHE_DIR",
|
|
11
|
+
"OUTPUT_DIR",
|
|
12
|
+
"DATA_DIR",
|
|
13
|
+
"CHECKPOINT_DIR",
|
|
14
|
+
"CAMPAIGN_DIR",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PathConfig:
|
|
19
|
+
"""
|
|
20
|
+
Portable path configuration that works across HPC, local, and cloud systems.
|
|
21
|
+
|
|
22
|
+
Resolution order for each path:
|
|
23
|
+
1. Environment variable (if set)
|
|
24
|
+
2. HPC-specific path (if exists)
|
|
25
|
+
3. Cross-platform default (always works)
|
|
26
|
+
|
|
27
|
+
Environment variables:
|
|
28
|
+
GSO_LIB_DIR - Library/dependency storage
|
|
29
|
+
GSO_CACHE_DIR - Coefficient cache
|
|
30
|
+
GSO_OUTPUT_DIR - Simulation outputs
|
|
31
|
+
GSO_DATA_DIR - Simulation data
|
|
32
|
+
GSO_CHECKPOINT_DIR - Optimization checkpoints
|
|
33
|
+
GSO_CAMPAIGN_DIR - Campaign results
|
|
34
|
+
GSO_BASE_DIR - Override base directory for all paths
|
|
35
|
+
|
|
36
|
+
Usage:
|
|
37
|
+
paths = PathConfig()
|
|
38
|
+
print(paths.cache_dir) # Returns appropriate path for current system
|
|
39
|
+
|
|
40
|
+
# Or with custom base:
|
|
41
|
+
paths = PathConfig(base_dir="/custom/path")
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(self, base_dir: Optional[str] = None, verbose: bool = True):
|
|
45
|
+
self._verbose = verbose
|
|
46
|
+
self._username = getpass.getuser()
|
|
47
|
+
|
|
48
|
+
# Determine base directory
|
|
49
|
+
self._base_dir = self._resolve_base_dir(base_dir)
|
|
50
|
+
|
|
51
|
+
# Initialize all paths
|
|
52
|
+
self._lib_dir: Optional[Path] = None
|
|
53
|
+
self._cache_dir: Optional[Path] = None
|
|
54
|
+
self._output_dir: Optional[Path] = None
|
|
55
|
+
self._data_dir: Optional[Path] = None
|
|
56
|
+
self._checkpoint_dir: Optional[Path] = None
|
|
57
|
+
self._campaign_dir: Optional[Path] = None
|
|
58
|
+
|
|
59
|
+
# Lazily initialized
|
|
60
|
+
self._initialized = False
|
|
61
|
+
|
|
62
|
+
def _resolve_base_dir(self, override: Optional[str]) -> Path:
|
|
63
|
+
"""Determine the base directory for all paths."""
|
|
64
|
+
# 1. Explicit override
|
|
65
|
+
if override is not None:
|
|
66
|
+
return Path(override)
|
|
67
|
+
|
|
68
|
+
# 2. Environment variable
|
|
69
|
+
env_base = os.environ.get("GSO_BASE_DIR")
|
|
70
|
+
if env_base:
|
|
71
|
+
return Path(env_base)
|
|
72
|
+
|
|
73
|
+
# 3. HPC scratch directory (common patterns)
|
|
74
|
+
hpc_scratch_patterns = [
|
|
75
|
+
f"/scratch/{self._username}", # SLURM standard
|
|
76
|
+
f"/scratch/users/{self._username}", # Some clusters
|
|
77
|
+
f"/work/{self._username}", # PBS/Torque
|
|
78
|
+
f"/gpfs/scratch/{self._username}", # GPFS-based
|
|
79
|
+
f"/lustre/scratch/{self._username}", # Lustre-based
|
|
80
|
+
os.environ.get("SCRATCH", ""), # $SCRATCH env var
|
|
81
|
+
os.environ.get("WORK", ""), # $WORK env var
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
for pattern in hpc_scratch_patterns:
|
|
85
|
+
if pattern and Path(pattern).exists():
|
|
86
|
+
if self._verbose:
|
|
87
|
+
print(f"Detected HPC scratch directory: {pattern}")
|
|
88
|
+
return Path(pattern)
|
|
89
|
+
|
|
90
|
+
# 4. Cross-platform default: user's home directory
|
|
91
|
+
home = Path.home()
|
|
92
|
+
default_base = home / ".wings"
|
|
93
|
+
|
|
94
|
+
if self._verbose:
|
|
95
|
+
print(f"Using default base directory: {default_base}")
|
|
96
|
+
|
|
97
|
+
return default_base
|
|
98
|
+
|
|
99
|
+
def _get_path(
|
|
100
|
+
self, env_var: str, subdir: str, hpc_alternatives: Optional[list[str]] = None
|
|
101
|
+
) -> Path:
|
|
102
|
+
"""
|
|
103
|
+
Resolve a path with environment override and HPC detection.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
env_var: Environment variable name to check
|
|
107
|
+
subdir: Subdirectory name under base_dir
|
|
108
|
+
hpc_alternatives: Alternative HPC paths to check
|
|
109
|
+
"""
|
|
110
|
+
# 1. Environment variable override
|
|
111
|
+
env_value = os.environ.get(env_var)
|
|
112
|
+
if env_value:
|
|
113
|
+
return Path(env_value)
|
|
114
|
+
|
|
115
|
+
# 2. Check HPC alternatives
|
|
116
|
+
if hpc_alternatives:
|
|
117
|
+
for alt in hpc_alternatives:
|
|
118
|
+
alt_path = Path(alt.format(username=self._username))
|
|
119
|
+
if alt_path.parent.exists():
|
|
120
|
+
return alt_path
|
|
121
|
+
|
|
122
|
+
# 3. Default: subdirectory of base
|
|
123
|
+
return self._base_dir / subdir
|
|
124
|
+
|
|
125
|
+
def _ensure_initialized(self) -> None:
|
|
126
|
+
"""Lazily initialize and create all directories."""
|
|
127
|
+
if self._initialized:
|
|
128
|
+
return
|
|
129
|
+
|
|
130
|
+
# Resolve all paths
|
|
131
|
+
self._lib_dir = self._get_path("GSO_LIB_DIR", "lib", ["/home/{username}/lib"])
|
|
132
|
+
|
|
133
|
+
self._cache_dir = self._get_path(
|
|
134
|
+
"GSO_CACHE_DIR", "cache/coefficients", ["/scratch/{username}/coefficient_cache"]
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
self._output_dir = self._get_path(
|
|
138
|
+
"GSO_OUTPUT_DIR", "output", ["/scratch/{username}/simulation_output"]
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
self._data_dir = self._get_path(
|
|
142
|
+
"GSO_DATA_DIR", "data", ["/scratch/{username}/simulation_data"]
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
self._checkpoint_dir = self._get_path(
|
|
146
|
+
"GSO_CHECKPOINT_DIR", "checkpoints", ["/scratch/{username}/optimization_checkpoints"]
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
self._campaign_dir = self._get_path(
|
|
150
|
+
"GSO_CAMPAIGN_DIR", "campaigns", ["/scratch/{username}/optimization_campaigns"]
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Create all directories
|
|
154
|
+
all_dirs = [
|
|
155
|
+
self._lib_dir,
|
|
156
|
+
self._cache_dir,
|
|
157
|
+
self._output_dir,
|
|
158
|
+
self._data_dir,
|
|
159
|
+
self._checkpoint_dir,
|
|
160
|
+
self._campaign_dir,
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
for directory in all_dirs:
|
|
164
|
+
try:
|
|
165
|
+
directory.mkdir(parents=True, exist_ok=True)
|
|
166
|
+
if self._verbose:
|
|
167
|
+
print(f"Created/verified directory: {directory}")
|
|
168
|
+
except PermissionError as e:
|
|
169
|
+
print(f"Warning: Cannot create {directory}: {e}")
|
|
170
|
+
print(f" Set {self._env_var_for_path(directory)} to override")
|
|
171
|
+
|
|
172
|
+
self._initialized = True
|
|
173
|
+
|
|
174
|
+
def _env_var_for_path(self, path: Path) -> str:
|
|
175
|
+
"""Get the environment variable name for a path (for error messages)."""
|
|
176
|
+
mapping = {
|
|
177
|
+
self._lib_dir: "GSO_LIB_DIR",
|
|
178
|
+
self._cache_dir: "GSO_CACHE_DIR",
|
|
179
|
+
self._output_dir: "GSO_OUTPUT_DIR",
|
|
180
|
+
self._data_dir: "GSO_DATA_DIR",
|
|
181
|
+
self._checkpoint_dir: "GSO_CHECKPOINT_DIR",
|
|
182
|
+
self._campaign_dir: "GSO_CAMPAIGN_DIR",
|
|
183
|
+
}
|
|
184
|
+
return mapping.get(path, "GSO_BASE_DIR")
|
|
185
|
+
|
|
186
|
+
@property
|
|
187
|
+
def lib_dir(self) -> Path:
|
|
188
|
+
self._ensure_initialized()
|
|
189
|
+
return self._lib_dir
|
|
190
|
+
|
|
191
|
+
@property
|
|
192
|
+
def cache_dir(self) -> Path:
|
|
193
|
+
self._ensure_initialized()
|
|
194
|
+
return self._cache_dir
|
|
195
|
+
|
|
196
|
+
@property
|
|
197
|
+
def output_dir(self) -> Path:
|
|
198
|
+
self._ensure_initialized()
|
|
199
|
+
return self._output_dir
|
|
200
|
+
|
|
201
|
+
@property
|
|
202
|
+
def data_dir(self) -> Path:
|
|
203
|
+
self._ensure_initialized()
|
|
204
|
+
return self._data_dir
|
|
205
|
+
|
|
206
|
+
@property
|
|
207
|
+
def checkpoint_dir(self) -> Path:
|
|
208
|
+
self._ensure_initialized()
|
|
209
|
+
return self._checkpoint_dir
|
|
210
|
+
|
|
211
|
+
@property
|
|
212
|
+
def campaign_dir(self) -> Path:
|
|
213
|
+
self._ensure_initialized()
|
|
214
|
+
return self._campaign_dir
|
|
215
|
+
|
|
216
|
+
def summary(self) -> str:
|
|
217
|
+
"""Return a summary of all configured paths."""
|
|
218
|
+
self._ensure_initialized()
|
|
219
|
+
lines = [
|
|
220
|
+
"Path Configuration:",
|
|
221
|
+
f" Base: {self._base_dir}",
|
|
222
|
+
f" Library: {self._lib_dir}",
|
|
223
|
+
f" Cache: {self._cache_dir}",
|
|
224
|
+
f" Output: {self._output_dir}",
|
|
225
|
+
f" Data: {self._data_dir}",
|
|
226
|
+
f" Checkpoints: {self._checkpoint_dir}",
|
|
227
|
+
f" Campaigns: {self._campaign_dir}",
|
|
228
|
+
]
|
|
229
|
+
return "\n".join(lines)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
# Initialize global path configuration
|
|
233
|
+
# This replaces the old hardcoded paths
|
|
234
|
+
_path_config: Optional[PathConfig] = None
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def get_path_config(base_dir: Optional[str] = None, verbose: bool = True) -> PathConfig:
|
|
238
|
+
"""
|
|
239
|
+
Get or create the global path configuration.
|
|
240
|
+
|
|
241
|
+
Call with base_dir to override on first use:
|
|
242
|
+
paths = get_path_config(base_dir="/my/custom/path")
|
|
243
|
+
|
|
244
|
+
Subsequent calls return the same instance:
|
|
245
|
+
paths = get_path_config() # Returns existing config
|
|
246
|
+
"""
|
|
247
|
+
global _path_config
|
|
248
|
+
if _path_config is None:
|
|
249
|
+
_path_config = PathConfig(base_dir=base_dir, verbose=verbose)
|
|
250
|
+
return _path_config
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
# For backward compatibility, create module-level path variables
|
|
254
|
+
# These are now properties that resolve lazily
|
|
255
|
+
class _LazyPath:
|
|
256
|
+
"""Descriptor for lazy path resolution with backward compatibility."""
|
|
257
|
+
|
|
258
|
+
def __init__(self, attr_name: str):
|
|
259
|
+
self.attr_name = attr_name
|
|
260
|
+
|
|
261
|
+
def __get__(self, obj, objtype=None) -> Path:
|
|
262
|
+
return getattr(get_path_config(verbose=False), self.attr_name)
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
class _PathNamespace:
|
|
266
|
+
"""Namespace providing backward-compatible path access."""
|
|
267
|
+
|
|
268
|
+
lib_dir = _LazyPath("lib_dir")
|
|
269
|
+
cache_dir = _LazyPath("cache_dir")
|
|
270
|
+
output_dir = _LazyPath("output_dir")
|
|
271
|
+
data_dir = _LazyPath("data_dir")
|
|
272
|
+
checkpoint_dir = _LazyPath("checkpoint_dir")
|
|
273
|
+
campaign_dir = _LazyPath("campaign_dir")
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
_paths = _PathNamespace()
|
|
277
|
+
|
|
278
|
+
# Backward-compatible module-level constants
|
|
279
|
+
# These now dynamically resolve to the correct paths
|
|
280
|
+
LIB_DIR = property(lambda _self: get_path_config(verbose=False).lib_dir)
|
|
281
|
+
CACHE_DIR = property(lambda _self: get_path_config(verbose=False).cache_dir)
|
|
282
|
+
OUTPUT_DIR = property(lambda _self: get_path_config(verbose=False).output_dir)
|
|
283
|
+
DATA_DIR = property(lambda _self: get_path_config(verbose=False).data_dir)
|
|
284
|
+
CHECKPOINT_DIR = property(lambda _self: get_path_config(verbose=False).checkpoint_dir)
|
|
285
|
+
CAMPAIGN_DIR = property(lambda _self: get_path_config(verbose=False).campaign_dir)
|
|
286
|
+
|
|
287
|
+
# Actually, for true backward compatibility at module level, use this simpler approach:
|
|
288
|
+
# Initialize paths on module load
|
|
289
|
+
_pc = get_path_config(verbose=False)
|
|
290
|
+
LIB_DIR = _pc.lib_dir
|
|
291
|
+
CACHE_DIR = _pc.cache_dir
|
|
292
|
+
OUTPUT_DIR = _pc.output_dir
|
|
293
|
+
DATA_DIR = _pc.data_dir
|
|
294
|
+
CHECKPOINT_DIR = _pc.checkpoint_dir
|
|
295
|
+
CAMPAIGN_DIR = _pc.campaign_dir
|
wings/py.typed
ADDED
wings/results.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
"""Result classes for optimization campaigns."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import pickle
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Optional
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
from numpy.typing import NDArray
|
|
11
|
+
|
|
12
|
+
from .config import CampaignConfig
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"RunResult",
|
|
16
|
+
"CampaignResults",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class RunResult:
|
|
22
|
+
run_id: int
|
|
23
|
+
fidelity: float
|
|
24
|
+
infidelity: float
|
|
25
|
+
params: NDArray[np.float64]
|
|
26
|
+
circuit_std: float
|
|
27
|
+
circuit_mean: float
|
|
28
|
+
time_seconds: float
|
|
29
|
+
n_evaluations: int
|
|
30
|
+
strategy: str
|
|
31
|
+
seed: int
|
|
32
|
+
success: bool
|
|
33
|
+
error_message: Optional[str] = None
|
|
34
|
+
|
|
35
|
+
def to_dict(self) -> dict:
|
|
36
|
+
"""Convert to serializable dictionary"""
|
|
37
|
+
return {
|
|
38
|
+
"run_id": self.run_id,
|
|
39
|
+
"strategy": self.strategy,
|
|
40
|
+
"seed": self.seed,
|
|
41
|
+
"fidelity": float(self.fidelity),
|
|
42
|
+
"infidelity": float(self.infidelity),
|
|
43
|
+
"params": self.params.tolist() if self.params is not None else None,
|
|
44
|
+
"circuit_std": float(self.circuit_std) if self.circuit_std else None,
|
|
45
|
+
"circuit_mean": float(self.circuit_mean) if self.circuit_mean else None,
|
|
46
|
+
"n_evaluations": self.n_evaluations,
|
|
47
|
+
"time_seconds": float(self.time_seconds),
|
|
48
|
+
"success": self.success,
|
|
49
|
+
"error_message": self.error_message,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def from_dict(cls, d: dict) -> "RunResult":
|
|
54
|
+
"""Create from dictionary"""
|
|
55
|
+
d = d.copy()
|
|
56
|
+
if d.get("params") is not None:
|
|
57
|
+
d["params"] = np.array(d["params"])
|
|
58
|
+
return cls(**d)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class CampaignResults:
|
|
62
|
+
"""
|
|
63
|
+
Aggregates and analyzes results from optimization campaign.
|
|
64
|
+
|
|
65
|
+
Tracks:
|
|
66
|
+
- All run results
|
|
67
|
+
- Best results found
|
|
68
|
+
- Statistics across runs
|
|
69
|
+
- Convergence analysis
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(self, config: CampaignConfig) -> None:
|
|
73
|
+
self.config: CampaignConfig = config
|
|
74
|
+
self.results: list[RunResult] = []
|
|
75
|
+
self._top_results: list[RunResult] = []
|
|
76
|
+
self.best_result: Optional[RunResult] = None
|
|
77
|
+
self.completed_runs: int = 0
|
|
78
|
+
self.failed_runs: int = 0
|
|
79
|
+
self.start_time: Optional[float] = None
|
|
80
|
+
self.end_time: Optional[float] = None
|
|
81
|
+
|
|
82
|
+
# Track top N results
|
|
83
|
+
self._top_n = config.save_top_n_results
|
|
84
|
+
|
|
85
|
+
def add_result(self, result: RunResult) -> None:
|
|
86
|
+
"""Add a run result and update tracking"""
|
|
87
|
+
self.results.append(result)
|
|
88
|
+
self.completed_runs += 1
|
|
89
|
+
|
|
90
|
+
if not result.success:
|
|
91
|
+
self.failed_runs += 1
|
|
92
|
+
return
|
|
93
|
+
|
|
94
|
+
# Update best result
|
|
95
|
+
if self.best_result is None or result.fidelity > self.best_result.fidelity:
|
|
96
|
+
self.best_result = result
|
|
97
|
+
if self.config.verbose >= 1:
|
|
98
|
+
print(
|
|
99
|
+
f" New best: Run {result.run_id}, F={result.fidelity:.12f}, "
|
|
100
|
+
f"1-F={result.infidelity:.3e}"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Update top N
|
|
104
|
+
self._update_top_results(result)
|
|
105
|
+
|
|
106
|
+
def _update_top_results(self, result: RunResult):
|
|
107
|
+
"""Maintain sorted list of top N results"""
|
|
108
|
+
self._top_results.append(result)
|
|
109
|
+
self._top_results.sort(key=lambda x: x.fidelity, reverse=True)
|
|
110
|
+
if len(self._top_results) > self._top_n:
|
|
111
|
+
self._top_results = self._top_results[: self._top_n]
|
|
112
|
+
|
|
113
|
+
def get_top_results(self, n: int = None) -> list[RunResult]:
|
|
114
|
+
"""Get top N results by fidelity"""
|
|
115
|
+
if n is None:
|
|
116
|
+
n = self._top_n
|
|
117
|
+
return self._top_results[:n]
|
|
118
|
+
|
|
119
|
+
def get_statistics(self) -> dict[str, Any]:
|
|
120
|
+
"""Compute statistics across all successful runs"""
|
|
121
|
+
successful = [r for r in self.results if r.success]
|
|
122
|
+
|
|
123
|
+
if not successful:
|
|
124
|
+
return {"error": "No successful runs"}
|
|
125
|
+
|
|
126
|
+
fidelities = np.array([r.fidelity for r in successful])
|
|
127
|
+
infidelities = 1 - fidelities
|
|
128
|
+
times = np.array([r.time_seconds for r in successful])
|
|
129
|
+
evals = np.array([r.n_evaluations for r in successful])
|
|
130
|
+
|
|
131
|
+
# Strategy breakdown
|
|
132
|
+
strategy_stats = {}
|
|
133
|
+
for strategy in self.config.strategy_weights:
|
|
134
|
+
strat_results = [r for r in successful if r.strategy == strategy]
|
|
135
|
+
if strat_results:
|
|
136
|
+
strat_fids = [r.fidelity for r in strat_results]
|
|
137
|
+
strategy_stats[strategy] = {
|
|
138
|
+
"count": len(strat_results),
|
|
139
|
+
"best_fidelity": max(strat_fids),
|
|
140
|
+
"mean_fidelity": np.mean(strat_fids),
|
|
141
|
+
"std_fidelity": np.std(strat_fids),
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
"total_runs": len(self.results),
|
|
146
|
+
"successful_runs": len(successful),
|
|
147
|
+
"failed_runs": self.failed_runs,
|
|
148
|
+
"success_rate": len(successful) / len(self.results) if self.results else 0,
|
|
149
|
+
"best_fidelity": float(np.max(fidelities)),
|
|
150
|
+
"best_infidelity": float(np.min(infidelities)),
|
|
151
|
+
"mean_fidelity": float(np.mean(fidelities)),
|
|
152
|
+
"std_fidelity": float(np.std(fidelities)),
|
|
153
|
+
"median_fidelity": float(np.median(fidelities)),
|
|
154
|
+
"fidelity_percentiles": {
|
|
155
|
+
"50": float(np.percentile(fidelities, 50)),
|
|
156
|
+
"90": float(np.percentile(fidelities, 90)),
|
|
157
|
+
"95": float(np.percentile(fidelities, 95)),
|
|
158
|
+
"99": float(np.percentile(fidelities, 99)),
|
|
159
|
+
"99.9": float(np.percentile(fidelities, 99.9)),
|
|
160
|
+
},
|
|
161
|
+
"total_time_seconds": float(np.sum(times)),
|
|
162
|
+
"mean_time_per_run": float(np.mean(times)),
|
|
163
|
+
"total_evaluations": int(np.sum(evals)),
|
|
164
|
+
"mean_evaluations_per_run": float(np.mean(evals)),
|
|
165
|
+
"strategy_breakdown": strategy_stats,
|
|
166
|
+
"target_achieved": self.best_result.fidelity >= self.config.target_fidelity
|
|
167
|
+
if self.best_result
|
|
168
|
+
else False,
|
|
169
|
+
"runs_above_target": int(np.sum(fidelities >= self.config.target_fidelity)),
|
|
170
|
+
"runs_above_acceptable": int(np.sum(infidelities <= self.config.acceptable_infidelity)),
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
def print_summary(self):
|
|
174
|
+
"""Print human-readable summary"""
|
|
175
|
+
stats = self.get_statistics()
|
|
176
|
+
|
|
177
|
+
print("\n" + "=" * 80)
|
|
178
|
+
print("CAMPAIGN RESULTS SUMMARY")
|
|
179
|
+
print("=" * 80)
|
|
180
|
+
|
|
181
|
+
print(
|
|
182
|
+
f"\nRuns: {stats['successful_runs']}/{stats['total_runs']} successful "
|
|
183
|
+
f"({stats['success_rate'] * 100:.1f}%)"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
print("\nBest Result:")
|
|
187
|
+
print(f" Fidelity: {stats['best_fidelity']:.15f}")
|
|
188
|
+
print(f" Infidelity: {stats['best_infidelity']:.3e}")
|
|
189
|
+
print(f" Target: {self.config.target_infidelity:.0e}")
|
|
190
|
+
print(f" Achieved: {' YES' if stats['target_achieved'] else ' NO'}")
|
|
191
|
+
|
|
192
|
+
print("\nFidelity Distribution:")
|
|
193
|
+
print(f" Mean: {stats['mean_fidelity']:.10f}")
|
|
194
|
+
print(f" Std: {stats['std_fidelity']:.3e}")
|
|
195
|
+
print(f" Median: {stats['median_fidelity']:.10f}")
|
|
196
|
+
|
|
197
|
+
print("\nFidelity Percentiles:")
|
|
198
|
+
for pct, val in stats["fidelity_percentiles"].items():
|
|
199
|
+
print(f" {pct}%:".ljust(10) + f"{val:.12f}")
|
|
200
|
+
|
|
201
|
+
print("\nRuns Meeting Targets:")
|
|
202
|
+
print(
|
|
203
|
+
f" Above target ({self.config.target_infidelity:.0e}): {stats['runs_above_target']}"
|
|
204
|
+
)
|
|
205
|
+
print(
|
|
206
|
+
f" Above acceptable ({self.config.acceptable_infidelity:.0e}): {stats['runs_above_acceptable']}"
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
print("\nComputation:")
|
|
210
|
+
print(f" Total time: {stats['total_time_seconds'] / 3600:.2f} hours")
|
|
211
|
+
print(f" Mean time/run: {stats['mean_time_per_run']:.1f}s")
|
|
212
|
+
print(f" Total evaluations: {stats['total_evaluations']:,}")
|
|
213
|
+
|
|
214
|
+
print("\nStrategy Performance:")
|
|
215
|
+
for strat, strat_stats in stats["strategy_breakdown"].items():
|
|
216
|
+
print(f" {strat}:")
|
|
217
|
+
print(
|
|
218
|
+
f" Runs: {strat_stats['count']}, "
|
|
219
|
+
f"Best: {strat_stats['best_fidelity']:.10f}, "
|
|
220
|
+
f"Mean: {strat_stats['mean_fidelity']:.8f}"
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
print("=" * 80)
|
|
224
|
+
|
|
225
|
+
def save(self, filepath: str = None):
|
|
226
|
+
"""Save results to disk"""
|
|
227
|
+
if filepath is None:
|
|
228
|
+
filepath = os.path.join(self.config.output_dir, "campaign_results.pkl")
|
|
229
|
+
|
|
230
|
+
# Save full pickle
|
|
231
|
+
with open(filepath, "wb") as f:
|
|
232
|
+
pickle.dump(self, f)
|
|
233
|
+
|
|
234
|
+
# Save JSON summary (without params to save space)
|
|
235
|
+
json_path = filepath.replace(".pkl", "_summary.json")
|
|
236
|
+
summary = {
|
|
237
|
+
"config": self.config.to_dict(),
|
|
238
|
+
"statistics": self.get_statistics(),
|
|
239
|
+
"best_result": self.best_result.to_dict() if self.best_result else None,
|
|
240
|
+
"top_results": [r.to_dict() for r in self._top_results],
|
|
241
|
+
}
|
|
242
|
+
with open(json_path, "w") as f:
|
|
243
|
+
json.dump(summary, f, indent=2, default=str)
|
|
244
|
+
|
|
245
|
+
# Save best params as numpy
|
|
246
|
+
if self.best_result is not None:
|
|
247
|
+
np.save(filepath.replace(".pkl", "_best_params.npy"), self.best_result.params)
|
|
248
|
+
|
|
249
|
+
return filepath
|
|
250
|
+
|
|
251
|
+
@classmethod
|
|
252
|
+
def load(cls, filepath: str) -> "CampaignResults":
|
|
253
|
+
"""Load results from disk"""
|
|
254
|
+
with open(filepath, "rb") as f:
|
|
255
|
+
return pickle.load(f)
|
wings/types.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from numpy.typing import NDArray
|
|
3
|
+
|
|
4
|
+
# Type aliases for clarity
|
|
5
|
+
ComplexArray = NDArray[np.complex128]
|
|
6
|
+
FloatArray = NDArray[np.float64]
|
|
7
|
+
ParameterArray = NDArray[np.float64]
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"ComplexArray",
|
|
11
|
+
"FloatArray",
|
|
12
|
+
"ParameterArray",
|
|
13
|
+
"NDArray",
|
|
14
|
+
]
|