wings-quantum 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
wings/optimizer.py ADDED
@@ -0,0 +1,1898 @@
1
+ """Main Gaussian state optimizer."""
2
+
3
+ import copy
4
+ import json
5
+ import os
6
+ import time
7
+ from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
8
+ from datetime import datetime
9
+ from typing import Optional
10
+
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ from numpy.typing import NDArray
14
+ from qiskit import QuantumCircuit, transpile
15
+ from qiskit.circuit import ParameterVector
16
+ from qiskit.quantum_info import Statevector
17
+ from scipy.optimize import basinhopping, differential_evolution, minimize
18
+
19
+ from .adam import AdamOptimizer, AdamWithRestarts
20
+ from .ansatz import DefaultAnsatz
21
+ from .compat import HAS_CUSTATEVEC
22
+ from .config import OptimizationPipeline, OptimizerConfig, TargetFunction
23
+ from .evaluators.cpu import ThreadSafeCircuitEvaluator
24
+ from .evaluators.custatevec import (
25
+ BatchedCuStateVecEvaluator,
26
+ CuStateVecEvaluator,
27
+ MultiGPUBatchEvaluator,
28
+ )
29
+ from .evaluators.gpu import GPUCircuitEvaluator
30
+ from .types import ComplexArray, FloatArray, ParameterArray
31
+
32
+ __all__ = ["GaussianOptimizer"]
33
+
34
+
35
+ class GaussianOptimizer:
36
+ """High-precision optimizer with enhanced convergence capabilities"""
37
+
38
+ def __init__(self, config: OptimizerConfig) -> None:
39
+ self.config = config
40
+ self.n_params = config.n_params
41
+
42
+ # Get ansatz (use config's ansatz or create default)
43
+ self.ansatz = config.ansatz
44
+ if self.ansatz is None:
45
+ self.ansatz = DefaultAnsatz(config.n_qubits)
46
+
47
+ # Build circuit template using ansatz
48
+ self.param_vector = ParameterVector("theta", self.n_params)
49
+ self.circuit = self.ansatz(
50
+ self.param_vector, config.n_qubits, **(config.ansatz_kwargs or {})
51
+ )
52
+
53
+ # Compute target Gaussian with high precision
54
+ self.positions = config.positions
55
+ self.target = self._compute_target_wavefunction()
56
+ # Pre-conjugate target for faster overlap
57
+ self._target_conj = np.conj(self.target)
58
+
59
+ # Tracking
60
+ self.n_evals = 0
61
+ self.best_fidelity = 0
62
+ self.best_params = None
63
+ self.history = {"fidelity": [], "iteration": [], "gradient_norm": []}
64
+ self.last_gradient = None
65
+ self._log_interval = 500 # Log every 500 evals instead of 100
66
+ self._last_log_time = time.time()
67
+ self._min_log_interval_sec = 2.0
68
+
69
+ self._circuit_transpiled = transpile(
70
+ self.circuit, basis_gates=["ry", "cx", "x"], optimization_level=1
71
+ )
72
+ # Pre-store parameter vector as list for faster zip
73
+ self._param_list = list(self.param_vector)
74
+
75
+ self._gpu_evaluator = None
76
+ if self.config.use_gpu:
77
+ print("\nInitializing GPU acceleration...")
78
+ self._gpu_evaluator = GPUCircuitEvaluator(self.config, self.target)
79
+ if self._gpu_evaluator.gpu_available:
80
+ print(" GPU acceleration enabled")
81
+ else:
82
+ print(" GPU not available, using CPU")
83
+
84
+ # === Multi-GPU Acceleration ===
85
+ self._multi_gpu_evaluator = None
86
+
87
+ if self.config.use_multi_gpu and HAS_CUSTATEVEC:
88
+ try:
89
+ import cupy as cp
90
+
91
+ n_gpus = cp.cuda.runtime.getDeviceCount()
92
+
93
+ if n_gpus > 1:
94
+ print(f"\nInitializing Multi-GPU acceleration ({n_gpus} GPUs available)...")
95
+ self._multi_gpu_evaluator = MultiGPUBatchEvaluator(
96
+ self.config,
97
+ self.target,
98
+ device_ids=self.config.gpu_device_ids,
99
+ simulators_per_gpu=self.config.simulators_per_gpu,
100
+ )
101
+ print(" Multi-GPU initialized")
102
+ print(f" GPUs: {self._multi_gpu_evaluator.device_ids}")
103
+ print(f" Simulators per GPU: {self.config.simulators_per_gpu}")
104
+ else:
105
+ print("\\nMulti-GPU requested but only 1 GPU available")
106
+ except Exception as e:
107
+ print(f" Multi-GPU initialization failed: {e}")
108
+ self._multi_gpu_evaluator = None
109
+ elif self.config.use_multi_gpu and not HAS_CUSTATEVEC:
110
+ print("\\nNote: Multi-GPU requires cuStateVec which is not available.")
111
+
112
+ # Add after the GPU evaluator initialization block
113
+
114
+ # === Stage 5: cuStateVec Acceleration ===
115
+ self._custatevec_evaluator = None
116
+ self._custatevec_batch_evaluator = None
117
+
118
+ if self.config.use_custatevec and HAS_CUSTATEVEC:
119
+ print("\nInitializing cuStateVec acceleration...")
120
+ try:
121
+ self._custatevec_evaluator = CuStateVecEvaluator(self.config, self.target)
122
+ self._custatevec_batch_evaluator = BatchedCuStateVecEvaluator(
123
+ self.config, self.target, n_simulators=4
124
+ )
125
+ print(" cuStateVec initialized")
126
+ print(f" Precision: {self.config.gpu_precision}")
127
+ print(" Batch simulators: 4")
128
+ except (RuntimeError, MemoryError) as e:
129
+ print(f" cuStateVec initialization failed (GPU issue): {e}")
130
+ self._custatevec_evaluator = None
131
+ self._custatevec_batch_evaluator = None
132
+ except ImportError as e:
133
+ print(f" cuStateVec initialization failed (missing library): {e}")
134
+ self._custatevec_evaluator = None
135
+ self._custatevec_batch_evaluator = None
136
+ elif self.config.use_custatevec and not HAS_CUSTATEVEC:
137
+ print("\nNote: cuStateVec requested but not available. Using Aer GPU fallback.")
138
+
139
+ def _compute_target_wavefunction(self) -> "ComplexArray":
140
+ """Compute normalized target wavefunction based on config."""
141
+ x = self.positions
142
+ dx = self.config.delta_x
143
+
144
+ if self.config.target_function == TargetFunction.GAUSSIAN:
145
+ psi = self._gaussian(x)
146
+ elif self.config.target_function == TargetFunction.LORENTZIAN:
147
+ psi = self._lorentzian(x)
148
+ elif self.config.target_function == TargetFunction.SECH:
149
+ psi = self._sech(x) # ADD THIS CASE
150
+ elif self.config.target_function == TargetFunction.CUSTOM:
151
+ if self.config.custom_target_fn is None:
152
+ raise ValueError("custom_target_fn required for CUSTOM target")
153
+ psi = self.config.custom_target_fn(x)
154
+ else:
155
+ raise ValueError(f"Unknown target function: {self.config.target_function}")
156
+
157
+ # Normalize
158
+ psi = psi.astype(np.complex128)
159
+ norm = np.sqrt(np.sum(np.abs(psi) ** 2) * dx)
160
+ psi = psi / norm
161
+
162
+ # Ensure unit norm
163
+ psi = psi / np.linalg.norm(psi)
164
+
165
+ return psi
166
+
167
+ def _gaussian(self, x: np.ndarray) -> np.ndarray:
168
+ """Gaussian wavefunction."""
169
+ return np.exp(-((x - self.config.x0) ** 2) / (2 * self.config.sigma**2))
170
+
171
+ def _lorentzian(self, x: np.ndarray) -> np.ndarray:
172
+ """Lorentzian (Cauchy) wavefunction."""
173
+ gamma = self.config.gamma if self.config.gamma else self.config.sigma
174
+ return gamma / ((x - self.config.x0) ** 2 + gamma**2)
175
+
176
+ def _sech(self, x: np.ndarray) -> np.ndarray:
177
+ """Hyperbolic secant wavefunction (soliton-like)."""
178
+ return 1.0 / np.cosh((x - self.config.x0) / self.config.sigma)
179
+
180
+ def get_statevector(self, params: ParameterArray, backend: str = "auto") -> ComplexArray:
181
+ """
182
+ Get statevector with automatic backend selection.
183
+
184
+ Args:
185
+ params: Circuit parameters
186
+ backend: 'auto', 'custatevec', 'gpu', or 'cpu'
187
+
188
+ Returns:
189
+ Statevector as numpy array
190
+ """
191
+ if backend == "auto":
192
+ if self.config.use_custatevec and self._custatevec_evaluator is not None:
193
+ backend = "custatevec"
194
+ elif (
195
+ self.config.use_gpu
196
+ and self._gpu_evaluator is not None
197
+ and self._gpu_evaluator.gpu_available
198
+ ):
199
+ backend = "gpu"
200
+ else:
201
+ backend = "cpu"
202
+
203
+ if backend == "custatevec":
204
+ return self._custatevec_evaluator.get_statevector(params)
205
+ elif backend == "gpu":
206
+ return self._gpu_evaluator.get_statevector(params)
207
+ else:
208
+ # CPU path using Qiskit
209
+ bound_circuit = self._circuit_transpiled.assign_parameters(
210
+ dict(zip(self._param_list, params))
211
+ )
212
+ return Statevector(bound_circuit).data
213
+
214
+ def compute_fidelity(
215
+ self,
216
+ params: Optional[ParameterArray] = None,
217
+ psi: Optional[ComplexArray] = None,
218
+ backend: str = "auto",
219
+ ) -> float:
220
+ """
221
+ Compute fidelity with automatic backend selection.
222
+
223
+ Args:
224
+ params: Circuit parameters (provide this OR psi)
225
+ psi: Pre-computed statevector (provide this OR params)
226
+ backend: 'auto', 'custatevec', 'gpu', or 'cpu'
227
+
228
+ Returns:
229
+ Fidelity value
230
+ """
231
+ if psi is not None:
232
+ # Direct computation from provided statevector
233
+ return self._compute_fidelity_fast(psi)
234
+
235
+ if params is None:
236
+ raise ValueError("Must provide either params or psi")
237
+
238
+ if backend == "auto":
239
+ if self.config.use_custatevec and self._custatevec_evaluator is not None:
240
+ backend = "custatevec"
241
+ elif (
242
+ self.config.use_gpu
243
+ and self._gpu_evaluator is not None
244
+ and self._gpu_evaluator.gpu_available
245
+ ):
246
+ backend = "gpu"
247
+ else:
248
+ backend = "cpu"
249
+
250
+ if backend == "custatevec":
251
+ return self._custatevec_evaluator.compute_fidelity(params)
252
+ elif backend == "gpu":
253
+ return self._gpu_evaluator.compute_fidelity(params)
254
+ else:
255
+ psi = self.get_statevector(params, backend="cpu")
256
+ return self._compute_fidelity_fast(psi)
257
+
258
+ def evaluate_population(
259
+ self, population: "NDArray[np.float64]", backend: str = "auto"
260
+ ) -> "FloatArray":
261
+ """
262
+ Evaluate fidelities for population with automatic backend selection.
263
+
264
+ Args:
265
+ population: Array of shape (pop_size, n_params)
266
+ backend: 'auto', 'multi_gpu', 'custatevec', 'gpu', or 'cpu'
267
+
268
+ Returns:
269
+ Array of fidelities
270
+ """
271
+ pop_size = len(population)
272
+
273
+ if backend == "auto":
274
+ # Priority: Multi-GPU > cuStateVec > GPU > CPU
275
+ if self.config.use_multi_gpu and self._multi_gpu_evaluator is not None:
276
+ backend = "multi_gpu"
277
+ elif self.config.use_custatevec and self._custatevec_batch_evaluator is not None:
278
+ backend = "custatevec"
279
+ elif (
280
+ self.config.use_gpu
281
+ and self._gpu_evaluator is not None
282
+ and self._gpu_evaluator.gpu_available
283
+ ):
284
+ backend = "gpu"
285
+ else:
286
+ backend = "cpu"
287
+
288
+ if backend == "multi_gpu":
289
+ # Use parallel multi-GPU evaluation
290
+ fidelities = self._multi_gpu_evaluator.evaluate_batch_parallel(population)
291
+ elif backend == "custatevec":
292
+ fidelities = self._custatevec_batch_evaluator.evaluate_batch_chunked(population)
293
+ elif backend == "gpu":
294
+ batch_size = self.config.gpu_batch_size
295
+ fidelities = np.zeros(pop_size)
296
+ for start in range(0, pop_size, batch_size):
297
+ end = min(start + batch_size, pop_size)
298
+ fidelities[start:end] = self._gpu_evaluator.compute_fidelities_batched(
299
+ population[start:end]
300
+ )
301
+ else:
302
+ # CPU path
303
+ if self.config.parallel_gradients and self.config.n_workers > 1:
304
+ fidelities = self._evaluate_population_parallel_cpu(population)
305
+ else:
306
+ fidelities = np.array(
307
+ [
308
+ self._compute_fidelity_fast(self.get_statevector(p, backend="cpu"))
309
+ for p in population
310
+ ]
311
+ )
312
+
313
+ # Update tracking
314
+ self.n_evals += pop_size
315
+ best_idx = np.argmax(fidelities)
316
+ if fidelities[best_idx] > self.best_fidelity:
317
+ self.best_fidelity = fidelities[best_idx]
318
+ self.best_params = population[best_idx].copy()
319
+
320
+ return fidelities
321
+
322
+ def _evaluate_population_parallel_cpu(self, population: np.ndarray) -> np.ndarray:
323
+ """CPU parallel population evaluation helper."""
324
+ pop_size = len(population)
325
+ chunk_size = max(1, pop_size // (self.config.n_workers * 2))
326
+
327
+ if not hasattr(self, "_parallel_evaluator"):
328
+ self._parallel_evaluator = ThreadSafeCircuitEvaluator(self.config, self.target)
329
+
330
+ def evaluate_chunk(indices: list[int]) -> list[tuple[int, float]]:
331
+ results = []
332
+ for idx in indices:
333
+ fid = self._parallel_evaluator.compute_fidelity(population[idx])
334
+ results.append((idx, fid))
335
+ return results
336
+
337
+ indices = list(range(pop_size))
338
+ chunks = [indices[i : i + chunk_size] for i in range(0, len(indices), chunk_size)]
339
+
340
+ fidelities = np.zeros(pop_size)
341
+
342
+ with ThreadPoolExecutor(max_workers=self.config.n_workers) as executor:
343
+ chunk_results = list(executor.map(evaluate_chunk, chunks))
344
+
345
+ for chunk_result in chunk_results:
346
+ for idx, fid in chunk_result:
347
+ fidelities[idx] = fid
348
+
349
+ return fidelities
350
+
351
+ def _compute_fidelity_fast(self, psi_circuit: ComplexArray) -> float:
352
+ """Optimized fidelity using pre-conjugated target"""
353
+ # Single dot product with pre-conjugated target
354
+ overlap = np.dot(self._target_conj, psi_circuit)
355
+ return overlap.real**2 + overlap.imag**2
356
+
357
+ def compute_gradient(self, params: np.ndarray, method: str = "auto") -> np.ndarray:
358
+ """
359
+ Unified gradient computation with automatic backend selection.
360
+
361
+ Args:
362
+ params: Current parameters
363
+ method: 'auto', 'custatevec', 'gpu', 'parallel', or 'sequential'
364
+
365
+ Returns:
366
+ Gradient array (for minimizing -fidelity)
367
+ """
368
+ if method == "auto":
369
+ # Priority: Multi-GPU > cuStateVec > GPU > Parallel CPU > Sequential CPU
370
+ if self.config.use_multi_gpu and self._multi_gpu_evaluator is not None:
371
+ method = "multi_gpu"
372
+ elif self.config.use_custatevec and self._custatevec_batch_evaluator is not None:
373
+ method = "custatevec"
374
+ elif (
375
+ self.config.use_gpu
376
+ and self._gpu_evaluator is not None
377
+ and self._gpu_evaluator.gpu_available
378
+ ):
379
+ method = "gpu"
380
+ elif self.config.parallel_gradients and self.config.n_workers > 1:
381
+ method = "parallel"
382
+ else:
383
+ method = "sequential"
384
+
385
+ if method == "multi_gpu":
386
+ return self._multi_gpu_evaluator.compute_gradient_parallel(params)
387
+ elif method == "custatevec":
388
+ return self._compute_gradient_custatevec_impl(params)
389
+ elif method == "gpu":
390
+ return self._compute_gradient_gpu_impl(params)
391
+ elif method == "parallel":
392
+ return self._compute_gradient_parallel_impl(params)
393
+ else:
394
+ return self._compute_gradient_sequential_impl(params)
395
+
396
+ def _compute_gradient_sequential_impl(self, params: np.ndarray) -> np.ndarray:
397
+ """
398
+ Compute gradient analytically via parameter-shift rule.
399
+ For RY gates: ∂f/∂θ = (f(θ+π/2) - f(θ-π/2)) / 2
400
+
401
+ This replaces finite-difference gradients (n+1 evals) with
402
+ exact gradients (2n evals, but more accurate).
403
+ """
404
+ gradient = np.zeros(self.n_params)
405
+ shift = np.pi / 2
406
+
407
+ for i in range(self.n_params):
408
+ # Forward shift: θ_i + π/2
409
+ params_plus = params.copy()
410
+ params_plus[i] += shift
411
+ psi_plus = self.get_statevector(params_plus)
412
+ fid_plus = self._compute_fidelity_fast(psi_plus)
413
+
414
+ # Backward shift: θ_i - π/2
415
+ params_minus = params.copy()
416
+ params_minus[i] -= shift
417
+ psi_minus = self.get_statevector(params_minus)
418
+ fid_minus = self._compute_fidelity_fast(psi_minus)
419
+
420
+ # Parameter-shift gradient formula
421
+ gradient[i] = (fid_plus - fid_minus) / 2
422
+
423
+ # Return negative gradient (we minimize -fidelity)
424
+ return -gradient
425
+
426
+ def compute_gradient_parallel(self, params: np.ndarray) -> np.ndarray:
427
+ return self.compute_gradient(params, method="parallel")
428
+
429
+ def _compute_gradient_parallel_impl(self, params: np.ndarray) -> np.ndarray:
430
+ """
431
+ Chunked parallel gradient computation for better load balancing.
432
+
433
+ Groups parameters into chunks to reduce thread overhead.
434
+ More efficient when n_params >> n_workers.
435
+ """
436
+ if not self.config.parallel_gradients or self.config.n_workers <= 1:
437
+ return self._compute_gradient_sequential_impl(params)
438
+
439
+ shift = np.pi / 2
440
+ n_workers = self.config.n_workers
441
+ chunk_size = self.config.gradient_chunk_size
442
+
443
+ # Create thread-safe evaluator if not exists
444
+ if not hasattr(self, "_parallel_evaluator"):
445
+ self._parallel_evaluator = ThreadSafeCircuitEvaluator(self.config, self.target)
446
+
447
+ def evaluate_chunk(param_indices: list[int]) -> list[tuple[int, float]]:
448
+ """Evaluate gradient for a chunk of parameters"""
449
+ results = []
450
+ for idx in param_indices:
451
+ # Forward shift
452
+ params_plus = params.copy()
453
+ params_plus[idx] += shift
454
+ fid_plus = self._parallel_evaluator.compute_fidelity(params_plus)
455
+
456
+ # Backward shift
457
+ params_minus = params.copy()
458
+ params_minus[idx] -= shift
459
+ fid_minus = self._parallel_evaluator.compute_fidelity(params_minus)
460
+
461
+ grad_i = (fid_plus - fid_minus) / 2
462
+ results.append((idx, grad_i))
463
+
464
+ return results
465
+
466
+ # Create chunks
467
+ indices = list(range(self.n_params))
468
+ chunks = [indices[i : i + chunk_size] for i in range(0, len(indices), chunk_size)]
469
+
470
+ # Parallel execution
471
+ gradient = np.zeros(self.n_params)
472
+
473
+ with ThreadPoolExecutor(max_workers=n_workers) as executor:
474
+ chunk_results = list(executor.map(evaluate_chunk, chunks))
475
+
476
+ # Collect results
477
+ for chunk_result in chunk_results:
478
+ for idx, grad_val in chunk_result:
479
+ gradient[idx] = grad_val
480
+
481
+ return -gradient
482
+
483
+ def _compute_gradient_custatevec_impl(self, params: np.ndarray) -> np.ndarray:
484
+ """
485
+ Compute gradient using cuStateVec batched evaluation.
486
+
487
+ This is the fastest gradient computation method.
488
+ """
489
+ if self._custatevec_batch_evaluator is not None:
490
+ return self._custatevec_batch_evaluator.compute_gradient_batched(params)
491
+ elif self._gpu_evaluator is not None and self._gpu_evaluator.gpu_available:
492
+ return self._compute_gradient_gpu_impl(params)
493
+ else:
494
+ return self._compute_gradient_sequential_impl(params)
495
+
496
+ def run_optimization(
497
+ self, pipeline: OptimizationPipeline = None, initial_params: np.ndarray = None, **kwargs
498
+ ) -> dict:
499
+ """
500
+ Unified optimization entry point.
501
+
502
+ Replaces: optimize(), optimize_ultra_precision(), optimize_hybrid()
503
+
504
+ Args:
505
+ pipeline: OptimizationPipeline config (or use kwargs for quick setup)
506
+ initial_params: Starting parameters (None = auto-select)
507
+ **kwargs: Override pipeline settings
508
+
509
+ Returns:
510
+ Results dictionary with optimal_params, fidelity, etc.
511
+ """
512
+ # Build pipeline config
513
+ if pipeline is None:
514
+ pipeline = OptimizationPipeline(**kwargs)
515
+ else:
516
+ # Apply any kwargs overrides
517
+ for key, value in kwargs.items():
518
+ if hasattr(pipeline, key):
519
+ setattr(pipeline, key, value)
520
+
521
+ start_time = time.time()
522
+
523
+ if pipeline.verbose:
524
+ print(f"\n{'=' * 80}")
525
+ print(f"OPTIMIZATION PIPELINE: {pipeline.mode.upper()}")
526
+ print(f"{'=' * 80}")
527
+ print(f" Target fidelity: {pipeline.target_fidelity}")
528
+ print(f" Target infidelity: {pipeline.target_infidelity:.0e}")
529
+ print(f" Max time: {pipeline.max_total_time:.0f}s")
530
+
531
+ current_params = initial_params
532
+
533
+ # === STAGE: Initialization Search ===
534
+ if pipeline.use_init_search:
535
+ current_params = self._pipeline_init_search(pipeline, current_params, start_time)
536
+ elif current_params is None:
537
+ current_params = self.get_initial_params("smart")
538
+
539
+ # === STAGE: Adam Exploration ===
540
+ if pipeline.use_adam_stage:
541
+ elapsed = time.time() - start_time
542
+ time_limit = pipeline.max_total_time * pipeline.adam_time_fraction
543
+ if self.best_fidelity < pipeline.target_fidelity and elapsed < time_limit:
544
+ current_params = self._pipeline_adam_stage(pipeline, current_params, start_time)
545
+
546
+ # === STAGE: Basin Hopping (if stuck) ===
547
+ if pipeline.use_basin_hopping:
548
+ elapsed = time.time() - start_time
549
+ if (
550
+ self.best_fidelity < pipeline.basin_hopping_threshold
551
+ and elapsed < pipeline.max_total_time * 0.8
552
+ ):
553
+ current_params = self._pipeline_basin_hopping(pipeline, current_params, start_time)
554
+
555
+ # === STAGE: L-BFGS-B Refinement ===
556
+ if pipeline.use_lbfgs_refinement:
557
+ elapsed = time.time() - start_time
558
+ time_limit = pipeline.max_total_time * pipeline.lbfgs_time_fraction
559
+ if self.best_fidelity < pipeline.target_fidelity and elapsed < time_limit:
560
+ current_params = self._pipeline_lbfgs_refinement(
561
+ pipeline, current_params, start_time
562
+ )
563
+
564
+ # === STAGE: Fine Tuning ===
565
+ if pipeline.use_fine_tuning:
566
+ elapsed = time.time() - start_time
567
+ if (
568
+ self.best_fidelity > pipeline.fine_tuning_threshold
569
+ and self.best_fidelity < pipeline.target_fidelity
570
+ and elapsed < pipeline.max_total_time
571
+ ):
572
+ current_params = self._pipeline_fine_tuning(pipeline, current_params, start_time)
573
+
574
+ # === Final Results ===
575
+ return self._pipeline_finalize(pipeline, start_time)
576
+
577
+ def _pipeline_init_search(
578
+ self, pipeline: OptimizationPipeline, initial_params: np.ndarray, _start_time: float
579
+ ) -> np.ndarray:
580
+ """Pipeline stage: initialization search."""
581
+ if pipeline.verbose:
582
+ print(f"\n{'=' * 60}")
583
+ print("STAGE: Initialization Search")
584
+ print("=" * 60)
585
+
586
+ best_init_fid = 0
587
+ best_init_params = initial_params
588
+ best_init_strategy = None
589
+
590
+ for i, strategy in enumerate(pipeline.init_strategies):
591
+ np.random.seed(42 + i)
592
+ params = self.get_initial_params(strategy)
593
+
594
+ # Use fastest available evaluator
595
+ if self.config.use_custatevec and self._custatevec_evaluator is not None:
596
+ fid = self._custatevec_evaluator.compute_fidelity(params)
597
+ elif self._gpu_evaluator is not None and self._gpu_evaluator.gpu_available:
598
+ fid = self._gpu_evaluator.compute_fidelity(params)
599
+ else:
600
+ psi = self.get_statevector(params)
601
+ fid = self._compute_fidelity_fast(psi)
602
+
603
+ if pipeline.verbose:
604
+ print(f" {strategy:20s}: F = {fid:.8f}")
605
+
606
+ if fid > best_init_fid:
607
+ best_init_fid = fid
608
+ best_init_params = params.copy()
609
+ best_init_strategy = strategy
610
+
611
+ if pipeline.verbose:
612
+ print(f"\nBest initialization: '{best_init_strategy}' with F = {best_init_fid:.8f}")
613
+
614
+ return best_init_params if initial_params is None else initial_params
615
+
616
+ def _pipeline_adam_stage(
617
+ self, pipeline: OptimizationPipeline, current_params: np.ndarray, start_time: float
618
+ ) -> np.ndarray:
619
+ """Pipeline stage: Adam exploration."""
620
+ if pipeline.verbose:
621
+ print(f"\n{'=' * 60}")
622
+ print("STAGE: Adam Exploration")
623
+ print("=" * 60)
624
+
625
+ # Use explicit max_time if provided, otherwise calculate from fraction
626
+ if pipeline.adam_max_time is not None:
627
+ adam_time_budget = pipeline.adam_max_time
628
+ else:
629
+ elapsed = time.time() - start_time
630
+ adam_time_budget = pipeline.max_total_time * pipeline.adam_time_fraction - elapsed
631
+ adam_time_budget = max(1.0, adam_time_budget)
632
+
633
+ if pipeline.verbose:
634
+ print(f" Time budget: {adam_time_budget:.0f}s")
635
+
636
+ self.optimize_adam(
637
+ current_params,
638
+ max_steps=pipeline.adam_max_steps,
639
+ lr=pipeline.adam_lr,
640
+ max_time=adam_time_budget,
641
+ )
642
+
643
+ if pipeline.verbose:
644
+ print(f"\nAfter Adam: F = {self.best_fidelity:.12f}")
645
+ print(f" Infidelity: {1 - self.best_fidelity:.3e}")
646
+
647
+ return self.best_params
648
+
649
+ def _pipeline_basin_hopping(
650
+ self, pipeline: OptimizationPipeline, _current_params: np.ndarray, _start_time: float
651
+ ) -> np.ndarray:
652
+ """Pipeline stage: Basin hopping for escaping local minima."""
653
+ if pipeline.verbose:
654
+ print(f"\n{'=' * 60}")
655
+ print("STAGE: Basin Hopping (escaping local minimum)")
656
+ print("=" * 60)
657
+
658
+ self.optimize_basin_hopping(
659
+ self.best_params,
660
+ n_iterations=pipeline.basin_hopping_iterations,
661
+ temperature=0.5,
662
+ local_optimizer="lbfgs",
663
+ )
664
+
665
+ return self.best_params
666
+
667
+ def _pipeline_lbfgs_refinement(
668
+ self, pipeline: OptimizationPipeline, current_params: np.ndarray, start_time: float
669
+ ) -> np.ndarray:
670
+ """Pipeline stage: L-BFGS-B high-precision refinement."""
671
+ if pipeline.verbose:
672
+ print(f"\n{'=' * 60}")
673
+ print("STAGE: L-BFGS-B High-Precision Refinement")
674
+ print("=" * 60)
675
+
676
+ if self.best_params is None or len(self.best_params) != self.n_params:
677
+ self.best_params = current_params
678
+
679
+ for tol in pipeline.lbfgs_tolerances:
680
+ if self.best_fidelity >= pipeline.target_fidelity:
681
+ break
682
+
683
+ elapsed = time.time() - start_time
684
+ if elapsed > pipeline.max_total_time * 0.9:
685
+ break
686
+
687
+ if pipeline.verbose:
688
+ print(f"\n Refinement pass (tol={tol:.0e})...")
689
+
690
+ self.config.gtol = tol
691
+ self.optimize_stage(
692
+ self.best_params, f"Refinement (tol={tol:.0e})", max_iter=3000, tolerance=tol
693
+ )
694
+
695
+ if pipeline.verbose:
696
+ print(f" F = {self.best_fidelity:.15f}")
697
+ print(f" Infidelity = {1 - self.best_fidelity:.3e}")
698
+
699
+ return self.best_params
700
+
701
+ def _pipeline_fine_tuning(
702
+ self, pipeline: OptimizationPipeline, _current_params: np.ndarray, _start_time: float
703
+ ) -> np.ndarray:
704
+ """Pipeline stage: Ultra-fine tuning for near-target fidelities."""
705
+ if pipeline.verbose:
706
+ print(f"\n{'=' * 60}")
707
+ print("STAGE: Ultra-Fine Tuning")
708
+ print("=" * 60)
709
+
710
+ # Small Adam steps
711
+ self.optimize_adam(
712
+ self.best_params,
713
+ max_steps=1000,
714
+ lr=0.0001,
715
+ convergence_threshold=pipeline.target_infidelity / 10,
716
+ )
717
+
718
+ # Final polish
719
+ self.config.gtol = 1e-15
720
+ self.optimize_stage(self.best_params, "Final Polish", max_iter=5000, tolerance=1e-15)
721
+
722
+ return self.best_params
723
+
724
+ def _pipeline_finalize(self, pipeline: OptimizationPipeline, start_time: float) -> dict:
725
+ """Finalize pipeline and return results."""
726
+ total_time = time.time() - start_time
727
+
728
+ # Get statevector for plotting - must match target ordering
729
+ if self.config.use_custatevec and self._custatevec_evaluator is not None:
730
+ final_psi = self._custatevec_evaluator.get_statevector_qiskit_order(self.best_params)
731
+ final_fidelity = self._custatevec_evaluator.compute_fidelity(self.best_params)
732
+ elif (
733
+ self.config.use_gpu
734
+ and self._gpu_evaluator is not None
735
+ and self._gpu_evaluator.gpu_available
736
+ ):
737
+ final_psi = self._gpu_evaluator.get_statevector(self.best_params)
738
+ final_fidelity = self._gpu_evaluator.compute_fidelity(self.best_params)
739
+ else:
740
+ final_psi = self.get_statevector(self.best_params)
741
+ final_fidelity = self._compute_fidelity_fast(final_psi)
742
+
743
+ circuit_stats = self.compute_statistics(final_psi)
744
+
745
+ results = {
746
+ "optimal_params": self.best_params,
747
+ "fidelity": final_fidelity,
748
+ "infidelity": 1 - final_fidelity,
749
+ "circuit_mean": circuit_stats["mean"],
750
+ "circuit_std": circuit_stats["std"],
751
+ "target_mean": self.config.x0,
752
+ "target_std": self.config.sigma,
753
+ "mean_error": abs(circuit_stats["mean"] - self.config.x0),
754
+ "std_error": abs(circuit_stats["std"] - self.config.sigma),
755
+ "relative_std_error": abs(circuit_stats["std"] - self.config.sigma) / self.config.sigma,
756
+ "time": total_time,
757
+ "n_evaluations": self.n_evals,
758
+ "success": final_fidelity >= pipeline.target_fidelity,
759
+ "final_statevector": final_psi,
760
+ "circuit_stats": circuit_stats,
761
+ }
762
+
763
+ if pipeline.verbose:
764
+ print(f"\n{'=' * 80}")
765
+ print("OPTIMIZATION COMPLETE")
766
+ print(f"{'=' * 80}")
767
+ print(f"Final fidelity: {final_fidelity:.15f}")
768
+ print(f"Infidelity: {1 - final_fidelity:.3e}")
769
+ print(f"Target infidelity: {pipeline.target_infidelity:.3e}")
770
+ print(f"Success: {' ' if results['success'] else ''}")
771
+ print(f"Circuit σ: {circuit_stats['std']:.10f}")
772
+ print(f"Target σ: {self.config.sigma:.10f}")
773
+ print(f"σ relative error: {results['relative_std_error'] * 100:.6f}%")
774
+ print(f"Total time: {total_time:.1f}s")
775
+ print(f"Total evaluations: {self.n_evals}")
776
+
777
+ return results
778
+
779
+ def objective_and_gradient(self, params: np.ndarray) -> tuple:
780
+ """
781
+ Combined objective and gradient computation for scipy.
782
+ Using jac=True in minimize() avoids redundant evaluations.
783
+ """
784
+ # Compute objective (also updates tracking)
785
+ obj = self.objective(params)
786
+ grad = self.compute_gradient(params)
787
+
788
+ # Store for diagnostics
789
+ self.last_gradient = grad
790
+ grad_norm = np.linalg.norm(grad)
791
+ if self.n_evals % self._log_interval == 0:
792
+ self.history["gradient_norm"].append(grad_norm)
793
+
794
+ return obj, grad
795
+
796
+ def objective(self, params: np.ndarray) -> float:
797
+ """Objective function for minimization"""
798
+ self.n_evals += 1
799
+
800
+ # Get circuit output
801
+ psi_circuit = self.get_statevector(params)
802
+
803
+ # Compute fidelity with high precision
804
+ fidelity = self._compute_fidelity_fast(psi_circuit)
805
+
806
+ # Track best with high precision comparison
807
+ if fidelity > self.best_fidelity:
808
+ self.best_fidelity = fidelity
809
+ self.best_params = params.copy()
810
+
811
+ if self.n_evals % 10 == 0: # Only store every 10th evaluation
812
+ self.history["fidelity"].append(fidelity)
813
+ self.history["iteration"].append(self.n_evals)
814
+
815
+ # Progress updates - show more precision
816
+ if self.config.verbose and self.n_evals % self._log_interval == 0:
817
+ current_time = time.time()
818
+ if current_time - self._last_log_time >= self._min_log_interval_sec:
819
+ print(f"Eval {self.n_evals:6d}: F={fidelity:.10f} (best={self.best_fidelity:.10f})")
820
+ self._last_log_time = current_time
821
+ if self.config.verbose and self.n_evals % 100 == 0:
822
+ print(
823
+ f"Eval {self.n_evals:6d}: Fidelity = {fidelity:.12f} (best = {self.best_fidelity:.12f})"
824
+ )
825
+
826
+ # Return negative for minimization
827
+ return -fidelity
828
+
829
+ def get_initial_params(self, strategy="smart", scale_factor=1.0):
830
+ """
831
+ Generate initial parameters with physics-informed strategies.
832
+
833
+ Strategies:
834
+ - 'smart': Physics-informed initialization based on target Gaussian
835
+ - 'gaussian_product': Approximate Gaussian as product state
836
+ - 'random': Uniform random in [-π, π]
837
+ - 'small_random': Small perturbations (for refinement)
838
+ - 'zero': All zeros
839
+ """
840
+ n = self.config.n_qubits
841
+ params = np.zeros(self.n_params)
842
+
843
+ if strategy == "smart":
844
+ # Physics-informed initialization
845
+ # Key insight: For Gaussian, we want smooth amplitude distribution
846
+
847
+ # Compute effective width in grid units
848
+ sigma_grid = self.config.sigma / self.config.delta_x
849
+ width_ratio = sigma_grid / self.config.n_states
850
+
851
+ # First layer: set up approximate Gaussian envelope
852
+ # Smaller angles for narrower Gaussians (less superposition needed)
853
+ base_angle = np.pi * min(0.3, width_ratio * 2) * scale_factor
854
+
855
+ for i in range(n):
856
+ # Higher-order qubits (larger 2^i) need smaller rotations
857
+ # for narrow Gaussians to avoid high-frequency components
858
+ bit_weight = 2**i / self.config.n_states
859
+ damping = np.exp(-bit_weight / (4 * width_ratio + 0.1))
860
+ params[i] = base_angle * damping * (1 + 0.1 * np.random.randn())
861
+
862
+ # Subsequent layers: entangling layers need small initial values
863
+ # to allow optimization to find correlations
864
+ remaining_params = self.n_params - n
865
+ if remaining_params > 0:
866
+ params[n:] = 0.1 * scale_factor * np.random.randn(remaining_params)
867
+
868
+ elif strategy == "gaussian_product":
869
+ # Approximate Gaussian as product state (no entanglement initially)
870
+ # Good starting point that optimization can refine
871
+
872
+ # For each computational basis state |x⟩, we want amplitude ~ exp(-x²/2σ²)
873
+ # With product state, amplitude of |x⟩ = ∏ᵢ amplitude of qubit i
874
+
875
+ sigma_grid = self.config.sigma / self.config.delta_x
876
+
877
+ for i in range(n):
878
+ # Contribution of qubit i to position
879
+ pos_contribution = 2**i - self.config.n_states / 2
880
+
881
+ # Desired probability for this qubit being |1⟩
882
+ # Based on Gaussian weight at this position contribution
883
+ gauss_weight = np.exp(-(pos_contribution**2) / (2 * sigma_grid**2 * n))
884
+
885
+ # RY(θ)|0⟩ = cos(θ/2)|0⟩ + sin(θ/2)|1⟩
886
+ # P(|1⟩) = sin²(θ/2), so θ = 2*arcsin(sqrt(p))
887
+ prob_one = np.clip(gauss_weight, 0.01, 0.99)
888
+ params[i] = 2 * np.arcsin(np.sqrt(prob_one)) * scale_factor
889
+
890
+ # Small random for entangling layers
891
+ params[n:] = 0.05 * scale_factor * np.random.randn(self.n_params - n)
892
+
893
+ elif strategy == "random":
894
+ params = np.random.uniform(-np.pi, np.pi, self.n_params)
895
+
896
+ elif strategy == "small_random":
897
+ # Small perturbations for refinement from current best
898
+ params = scale_factor * np.random.randn(self.n_params)
899
+
900
+ elif strategy == "perturb_best":
901
+ # Perturb from current best (if available)
902
+ if self.best_params is not None:
903
+ params = self.best_params + scale_factor * 0.1 * np.random.randn(self.n_params)
904
+ else:
905
+ params = self.get_initial_params("smart", scale_factor)
906
+
907
+ else: # 'zero'
908
+ pass # Already zeros
909
+
910
+ return params
911
+
912
+ def cleanup(self) -> None:
913
+ """Release all GPU resources."""
914
+ if hasattr(self, "_multi_gpu_evaluator") and self._multi_gpu_evaluator is not None:
915
+ self._multi_gpu_evaluator.cleanup()
916
+ self._multi_gpu_evaluator = None
917
+
918
+ if hasattr(self, "_custatevec_evaluator") and self._custatevec_evaluator is not None:
919
+ self._custatevec_evaluator.cleanup()
920
+ self._custatevec_evaluator = None
921
+
922
+ if (
923
+ hasattr(self, "_custatevec_batch_evaluator")
924
+ and self._custatevec_batch_evaluator is not None
925
+ ):
926
+ self._custatevec_batch_evaluator.cleanup()
927
+ self._custatevec_batch_evaluator = None
928
+
929
+ def compute_statistics(self, psi: np.ndarray) -> dict:
930
+ """Compute wavefunction statistics with high precision"""
931
+ x = self.positions
932
+ dx = self.config.delta_x
933
+
934
+ # Probability density with high precision
935
+ prob = np.abs(psi) ** 2
936
+ prob_sum = np.sum(prob) * dx
937
+ prob = prob / prob_sum
938
+
939
+ # Moments with high precision
940
+ mean_x = np.sum(x * prob) * dx
941
+ variance = np.sum((x - mean_x) ** 2 * prob) * dx
942
+ std_x = np.sqrt(max(variance, 0)) # Ensure non-negative
943
+
944
+ return {"mean": mean_x, "std": std_x, "variance": variance}
945
+
946
+ def _compute_gradient_gpu_impl(self, params: np.ndarray) -> np.ndarray:
947
+ """
948
+ Compute gradient using batched GPU evaluation.
949
+
950
+ Instead of 2*n_params individual calls, we make:
951
+ - 1 call with 2*n_params circuits (all shifts at once)
952
+
953
+ This is much faster on GPU due to parallelism.
954
+ """
955
+ if self._gpu_evaluator is None or not self._gpu_evaluator.gpu_available:
956
+ # Fall back to sequential analytic gradient
957
+ return self._compute_gradient_sequential_impl(params)
958
+
959
+ shift = np.pi / 2
960
+ n_params = self.n_params
961
+
962
+ # Build all shifted parameter sets at once
963
+ # Shape: (2 * n_params, n_params)
964
+ params_shifted = np.zeros((2 * n_params, n_params))
965
+
966
+ for i in range(n_params):
967
+ # Forward shift
968
+ params_shifted[2 * i] = params.copy()
969
+ params_shifted[2 * i, i] += shift
970
+
971
+ # Backward shift
972
+ params_shifted[2 * i + 1] = params.copy()
973
+ params_shifted[2 * i + 1, i] -= shift
974
+
975
+ # Single batched GPU call for all shifts
976
+ fidelities = self._gpu_evaluator.compute_fidelities_batched(params_shifted)
977
+
978
+ # Compute gradients from shift results
979
+ gradient = np.zeros(n_params)
980
+ for i in range(n_params):
981
+ fid_plus = fidelities[2 * i]
982
+ fid_minus = fidelities[2 * i + 1]
983
+ gradient[i] = (fid_plus - fid_minus) / 2
984
+
985
+ # Return negative gradient (we minimize -fidelity)
986
+ return -gradient
987
+
988
+ def optimize_stage(
989
+ self, initial_params: np.ndarray, stage_name: str, max_iter: int, tolerance: float
990
+ ) -> dict:
991
+ """Run a single optimization stage"""
992
+ print(f"\n{stage_name}...")
993
+ print(f" Max iterations: {max_iter}")
994
+ print(f" Tolerance: {tolerance:.2e}")
995
+
996
+ if self.config.method == "differential_evolution":
997
+ bounds = [(-2 * np.pi, 2 * np.pi)] * self.n_params
998
+ result = differential_evolution(
999
+ self.objective,
1000
+ bounds,
1001
+ maxiter=max_iter // 15,
1002
+ tol=tolerance,
1003
+ disp=self.config.verbose,
1004
+ polish=True,
1005
+ workers=1,
1006
+ atol=tolerance / 10,
1007
+ )
1008
+ else:
1009
+ # High-precision optimization options
1010
+ options = {
1011
+ "maxiter": max_iter,
1012
+ "maxfun": self.config.max_fun,
1013
+ "ftol": tolerance,
1014
+ "gtol": self.config.gtol,
1015
+ "disp": self.config.verbose,
1016
+ }
1017
+
1018
+ # For very high precision, use tighter convergence
1019
+ if self.config.high_precision:
1020
+ options["maxcor"] = 30 # More corrections with exact gradients
1021
+ options["maxls"] = 40 # Line search steps
1022
+
1023
+ # Use analytic gradients if enabled
1024
+ if getattr(self.config, "use_analytic_gradients", True):
1025
+ result = minimize(
1026
+ self.objective_and_gradient,
1027
+ initial_params,
1028
+ method=self.config.method,
1029
+ jac=True, # We provide gradients
1030
+ bounds=[(-2 * np.pi, 2 * np.pi)] * self.n_params,
1031
+ options=options,
1032
+ )
1033
+ else:
1034
+ result = minimize(
1035
+ self.objective,
1036
+ initial_params,
1037
+ method=self.config.method,
1038
+ bounds=[(-2 * np.pi, 2 * np.pi)] * self.n_params,
1039
+ options=options,
1040
+ )
1041
+
1042
+ return result
1043
+
1044
+ def optimize_adam(
1045
+ self,
1046
+ initial_params: np.ndarray,
1047
+ max_steps: int = 2000,
1048
+ lr: float = 0.02,
1049
+ max_time: float = None,
1050
+ convergence_window: int = 100,
1051
+ convergence_threshold: float = 1e-8,
1052
+ verbose_interval: int = 100,
1053
+ ) -> dict:
1054
+ """
1055
+ Adam optimization with parameter-shift gradients.
1056
+
1057
+ Effective for escaping local minima and plateaus where
1058
+ L-BFGS-B gets stuck.
1059
+
1060
+ Args:
1061
+ initial_params: Starting parameters
1062
+ max_steps: Maximum Adam steps
1063
+ lr: Initial learning rate
1064
+ convergence_window: Steps to check for convergence
1065
+ convergence_threshold: Minimum improvement to continue
1066
+ verbose_interval: Print progress every N steps
1067
+
1068
+ Returns:
1069
+ Dictionary with optimization results
1070
+ """
1071
+ start_time = time.time()
1072
+ print(f"\nAdam Optimization (lr={lr}, max_steps={max_steps})")
1073
+ print("-" * 50)
1074
+
1075
+ params = initial_params.copy()
1076
+ optimizer = AdamWithRestarts(self.n_params, lr_max=lr, lr_min=lr / 50, restart_period=200)
1077
+
1078
+ # Tracking
1079
+ fidelity_history = []
1080
+ best_fidelity = 0
1081
+ best_params = params.copy()
1082
+
1083
+ start_time = time.time()
1084
+
1085
+ for step in range(max_steps):
1086
+ if max_time is not None and (time.time() - start_time) > max_time:
1087
+ print(f" Time limit reached at step {step}")
1088
+ break
1089
+ # Compute fidelity and gradient
1090
+ # Priority: cuStateVec > Aer GPU > CPU
1091
+ if self.config.use_custatevec and self._custatevec_evaluator is not None:
1092
+ fidelity = self._custatevec_evaluator.compute_fidelity(params)
1093
+ gradient = self._compute_gradient_custatevec_impl(params)
1094
+ elif (
1095
+ self.config.use_gpu
1096
+ and self._gpu_evaluator is not None
1097
+ and self._gpu_evaluator.gpu_available
1098
+ ):
1099
+ fidelity = self._gpu_evaluator.compute_fidelity(params)
1100
+ gradient = self._compute_gradient_gpu_impl(params)
1101
+ else:
1102
+ psi = self.get_statevector(params)
1103
+ fidelity = self._compute_fidelity_fast(psi)
1104
+ gradient = self._compute_gradient_sequential_impl(params)
1105
+
1106
+ # Track best
1107
+ if fidelity > best_fidelity:
1108
+ best_fidelity = fidelity
1109
+ best_params = params.copy()
1110
+
1111
+ fidelity_history.append(fidelity)
1112
+
1113
+ # Convergence check
1114
+ if len(fidelity_history) > convergence_window:
1115
+ recent_improvement = max(fidelity_history[-convergence_window:]) - min(
1116
+ fidelity_history[-convergence_window:]
1117
+ )
1118
+ if recent_improvement < convergence_threshold and fidelity > 0.99:
1119
+ print(
1120
+ f" Converged at step {step} (improvement {recent_improvement:.2e} < {convergence_threshold:.2e})"
1121
+ )
1122
+ break
1123
+
1124
+ # Progress logging
1125
+ if step % verbose_interval == 0:
1126
+ grad_norm = np.linalg.norm(gradient)
1127
+ current_lr = optimizer.get_lr()
1128
+ print(
1129
+ f" Step {step:5d}: F={fidelity:.10f}, |∇|={grad_norm:.2e}, lr={current_lr:.4f}"
1130
+ )
1131
+
1132
+ # Adam update (gradient points toward increasing fidelity,
1133
+ # but Adam minimizes, so we negate)
1134
+ params = optimizer.step(params, gradient)
1135
+
1136
+ # Keep parameters bounded
1137
+ params = np.clip(params, -2 * np.pi, 2 * np.pi)
1138
+
1139
+ self.n_evals += step
1140
+ elapsed = time.time() - start_time
1141
+
1142
+ # Update instance tracking
1143
+ if best_fidelity > self.best_fidelity:
1144
+ self.best_fidelity = best_fidelity
1145
+ self.best_params = best_params
1146
+
1147
+ print(f"\nAdam complete: F={best_fidelity:.12f} in {elapsed:.1f}s ({step + 1} steps)")
1148
+
1149
+ return {
1150
+ "params": best_params,
1151
+ "fidelity": best_fidelity,
1152
+ "history": fidelity_history,
1153
+ "steps": step + 1,
1154
+ "time": elapsed,
1155
+ }
1156
+
1157
+ def optimize(self, initial_params: Optional[np.ndarray] = None) -> dict:
1158
+ """
1159
+ Multi-stage adaptive optimization.
1160
+
1161
+ DEPRECATED: Use run_optimization(mode='adaptive') instead.
1162
+ """
1163
+ pipeline = OptimizationPipeline(
1164
+ mode="adaptive",
1165
+ target_fidelity=getattr(self.config, "target_fidelity", 0.9999),
1166
+ use_basin_hopping=False,
1167
+ verbose=self.config.verbose,
1168
+ )
1169
+ return self.run_optimization(pipeline, initial_params)
1170
+
1171
+ def optimize_ultra_precision(
1172
+ self,
1173
+ target_infidelity: float = 1e-10,
1174
+ max_total_time: float = 3600,
1175
+ initial_params: np.ndarray = None,
1176
+ ) -> dict:
1177
+ """
1178
+ Ultra-high precision optimization pipeline.
1179
+
1180
+ DEPRECATED: Use run_optimization(mode='ultra', ...) instead.
1181
+ """
1182
+ pipeline = OptimizationPipeline(
1183
+ mode="ultra",
1184
+ target_fidelity=1 - target_infidelity,
1185
+ max_total_time=max_total_time,
1186
+ use_basin_hopping=True,
1187
+ basin_hopping_threshold=0.9999,
1188
+ use_fine_tuning=True,
1189
+ verbose=self.config.verbose,
1190
+ )
1191
+ return self.run_optimization(pipeline, initial_params)
1192
+
1193
+ def optimize_hybrid(
1194
+ self, initial_params: np.ndarray = None, adam_steps: int = 5000, _lbfgs_iter: int = 2000
1195
+ ) -> dict:
1196
+ """
1197
+ Hybrid Adam + L-BFGS-B optimization.
1198
+
1199
+ DEPRECATED: Use run_optimization(mode='hybrid', ...) instead.
1200
+ """
1201
+ pipeline = OptimizationPipeline(
1202
+ mode="hybrid",
1203
+ target_fidelity=getattr(self.config, "target_fidelity", 0.9999),
1204
+ use_init_search=True,
1205
+ use_adam_stage=True,
1206
+ adam_max_steps=adam_steps,
1207
+ use_basin_hopping=False,
1208
+ use_lbfgs_refinement=True,
1209
+ lbfgs_tolerances=[1e-10, 1e-12],
1210
+ use_fine_tuning=False,
1211
+ verbose=self.config.verbose,
1212
+ )
1213
+ return self.run_optimization(pipeline, initial_params)
1214
+
1215
+ def optimize_basin_hopping(
1216
+ self,
1217
+ initial_params: np.ndarray = None,
1218
+ n_iterations: int = 50,
1219
+ temperature: float = 1.0,
1220
+ step_size: float = 0.5,
1221
+ local_optimizer: str = "adam", # 'adam' or 'lbfgs'
1222
+ ) -> dict:
1223
+ """
1224
+ Basin hopping global optimization.
1225
+
1226
+ Combines random jumps with local optimization to explore
1227
+ multiple basins and find global minimum.
1228
+
1229
+ Effective for escaping deep local minima that Adam cannot escape.
1230
+ """
1231
+
1232
+ print(f"\n{'=' * 80}")
1233
+ print("BASIN HOPPING GLOBAL OPTIMIZATION")
1234
+ print(f"{'=' * 80}")
1235
+ print(f" Iterations: {n_iterations}")
1236
+ print(f" Temperature: {temperature}")
1237
+ print(f" Step size: {step_size}")
1238
+ print(f" Local optimizer: {local_optimizer}")
1239
+
1240
+ if initial_params is None:
1241
+ initial_params = self.get_initial_params("smart")
1242
+
1243
+ # Custom local minimizer using Adam
1244
+ def local_adam_minimizer(fun, x0, args=(), **kwargs):
1245
+ """Local minimizer using Adam for a fixed number of steps"""
1246
+ params = x0.copy()
1247
+ adam = AdamOptimizer(len(params), learning_rate=0.02)
1248
+
1249
+ best_f = fun(params)
1250
+ best_params = params.copy()
1251
+
1252
+ for _ in range(200): # Short Adam run
1253
+ # Numerical gradient (fast approximation for basin hopping)
1254
+ grad = np.zeros_like(params)
1255
+ eps = 1e-5
1256
+ f0 = fun(params)
1257
+ for i in range(len(params)):
1258
+ params[i] += eps
1259
+ grad[i] = (fun(params) - f0) / eps
1260
+ params[i] -= eps
1261
+
1262
+ params = adam.step(params, grad)
1263
+ params = np.clip(params, -2 * np.pi, 2 * np.pi)
1264
+
1265
+ f = fun(params)
1266
+ if f < best_f:
1267
+ best_f = f
1268
+ best_params = params.copy()
1269
+
1270
+ class Result:
1271
+ x = best_params
1272
+ fun = best_f
1273
+ success = True
1274
+
1275
+ return Result()
1276
+
1277
+ # Minimizer options
1278
+ if local_optimizer == "adam":
1279
+ minimizer_kwargs = {
1280
+ "method": local_adam_minimizer,
1281
+ }
1282
+ else:
1283
+ minimizer_kwargs = {
1284
+ "method": "L-BFGS-B",
1285
+ "bounds": [(-2 * np.pi, 2 * np.pi)] * self.n_params,
1286
+ "options": {"maxiter": 500, "ftol": 1e-10},
1287
+ }
1288
+
1289
+ # Callback to track progress
1290
+ best_fidelities = []
1291
+
1292
+ def callback(x, f, accept):
1293
+ fid = -f
1294
+ best_fidelities.append(fid)
1295
+ if len(best_fidelities) % 10 == 0:
1296
+ print(f" Iteration {len(best_fidelities)}: F={fid:.10f}, accepted={accept}")
1297
+
1298
+ start_time = time.time()
1299
+
1300
+ result = basinhopping(
1301
+ self.objective,
1302
+ initial_params,
1303
+ niter=n_iterations,
1304
+ T=temperature,
1305
+ stepsize=step_size,
1306
+ minimizer_kwargs=minimizer_kwargs,
1307
+ callback=callback,
1308
+ seed=42,
1309
+ )
1310
+
1311
+ elapsed = time.time() - start_time
1312
+
1313
+ # Update best
1314
+ final_fidelity = -result.fun
1315
+ if final_fidelity > self.best_fidelity:
1316
+ self.best_fidelity = final_fidelity
1317
+ self.best_params = result.x
1318
+
1319
+ print(f"\nBasin hopping complete: F={self.best_fidelity:.12f} in {elapsed:.1f}s")
1320
+
1321
+ return {
1322
+ "params": self.best_params,
1323
+ "fidelity": self.best_fidelity,
1324
+ "history": best_fidelities,
1325
+ "time": elapsed,
1326
+ }
1327
+
1328
+ def optimize_multistart_parallel(
1329
+ self,
1330
+ n_starts: int = 10,
1331
+ strategies: list[str] = None,
1332
+ max_iter_per_start: int = 2000,
1333
+ tolerance: float = 1e-10,
1334
+ return_all: bool = False,
1335
+ ) -> dict:
1336
+ """
1337
+ Parallel multi-start optimization for robust global minimum search.
1338
+
1339
+ Runs multiple independent optimizations in parallel with different
1340
+ initializations, then returns the best result.
1341
+
1342
+ Args:
1343
+ n_starts: Number of independent optimization runs
1344
+ strategies: List of initialization strategies to cycle through
1345
+ max_iter_per_start: Max iterations per individual optimization
1346
+ tolerance: Convergence tolerance for each run
1347
+ return_all: If True, return all results (not just best)
1348
+
1349
+ Returns:
1350
+ Best optimization result (or all results if return_all=True)
1351
+ """
1352
+ if strategies is None:
1353
+ strategies = ["smart", "gaussian_product", "random"]
1354
+
1355
+ n_workers = min(self.config.n_workers, n_starts)
1356
+
1357
+ print(f"\n{'=' * 80}")
1358
+ print("PARALLEL MULTI-START OPTIMIZATION")
1359
+ print(f"{'=' * 80}")
1360
+ print(f" Starts: {n_starts}")
1361
+ print(f" Workers: {n_workers}")
1362
+ print(f" Strategies: {strategies}")
1363
+ print(f" Max iter/start: {max_iter_per_start}")
1364
+
1365
+ start_time = time.time()
1366
+
1367
+ # Prepare configurations for each start
1368
+ start_configs = []
1369
+ for i in range(n_starts):
1370
+ strategy = strategies[i % len(strategies)]
1371
+ seed = 42 + i # Reproducible seeds
1372
+ start_configs.append(
1373
+ {
1374
+ "start_id": i,
1375
+ "strategy": strategy,
1376
+ "seed": seed,
1377
+ "max_iter": max_iter_per_start,
1378
+ "tolerance": tolerance,
1379
+ }
1380
+ )
1381
+
1382
+ def run_single_start(start_config: dict) -> dict:
1383
+ """Run a single optimization start (for parallel execution)"""
1384
+ start_id = start_config["start_id"]
1385
+ strategy = start_config["strategy"]
1386
+ seed = start_config["seed"]
1387
+
1388
+ # Set seed for reproducibility
1389
+ np.random.seed(seed)
1390
+
1391
+ # Create fresh optimizer for this process
1392
+ # (necessary for ProcessPoolExecutor, optional for ThreadPoolExecutor)
1393
+ config_copy = copy.copy(self.config)
1394
+ config_copy.verbose = False # Suppress output in parallel runs
1395
+ config_copy.parallel_gradients = False # Avoid nested parallelism
1396
+ config_copy.use_custatevec = (
1397
+ False # ADD THIS - cuStateVec doesn't work well with multiprocessing
1398
+ )
1399
+ config_copy.use_gpu = False # ADD THIS - Aer GPU also has issues
1400
+
1401
+ optimizer = GaussianOptimizer(config_copy)
1402
+
1403
+ # Get initial parameters
1404
+ initial_params = optimizer.get_initial_params(strategy)
1405
+
1406
+ # Run optimization
1407
+ try:
1408
+ optimizer.optimize_stage(
1409
+ initial_params,
1410
+ f"Start {start_id}",
1411
+ start_config["max_iter"],
1412
+ start_config["tolerance"],
1413
+ )
1414
+
1415
+ return {
1416
+ "start_id": start_id,
1417
+ "strategy": strategy,
1418
+ "seed": seed,
1419
+ "fidelity": optimizer.best_fidelity,
1420
+ "params": optimizer.best_params,
1421
+ "success": True,
1422
+ "n_evals": optimizer.n_evals,
1423
+ }
1424
+ except Exception as e:
1425
+ return {
1426
+ "start_id": start_id,
1427
+ "strategy": strategy,
1428
+ "seed": seed,
1429
+ "fidelity": 0.0,
1430
+ "params": None,
1431
+ "success": False,
1432
+ "error": str(e),
1433
+ "n_evals": 0,
1434
+ }
1435
+
1436
+ # Run in parallel
1437
+ # Note: ProcessPoolExecutor is safer but has overhead
1438
+ # ThreadPoolExecutor is faster but requires thread-safe code
1439
+ all_results = []
1440
+
1441
+ if self.config.parallel_backend == "process":
1442
+ # Process-based parallelism (safer, more overhead)
1443
+ with ProcessPoolExecutor(max_workers=n_workers) as executor:
1444
+ all_results = list(executor.map(run_single_start, start_configs))
1445
+ else:
1446
+ # Thread-based parallelism (faster, requires thread-safety)
1447
+ with ThreadPoolExecutor(max_workers=n_workers) as executor:
1448
+ all_results = list(executor.map(run_single_start, start_configs))
1449
+
1450
+ elapsed = time.time() - start_time
1451
+
1452
+ # Find best result
1453
+ successful_results = [r for r in all_results if r["success"]]
1454
+
1455
+ if not successful_results:
1456
+ print("WARNING: All optimization starts failed!")
1457
+ return {"fidelity": 0, "params": None, "success": False}
1458
+
1459
+ best_result = max(successful_results, key=lambda x: x["fidelity"])
1460
+
1461
+ # Update instance state with best result
1462
+ if best_result["fidelity"] > self.best_fidelity:
1463
+ self.best_fidelity = best_result["fidelity"]
1464
+ self.best_params = best_result["params"]
1465
+
1466
+ # Summary
1467
+ fidelities = [r["fidelity"] for r in successful_results]
1468
+ total_evals = sum(r["n_evals"] for r in all_results)
1469
+
1470
+ print(f"\n{'=' * 60}")
1471
+ print("Multi-start Results Summary")
1472
+ print(f"{'=' * 60}")
1473
+ print(f" Successful starts: {len(successful_results)}/{n_starts}")
1474
+ print(f" Best fidelity: {best_result['fidelity']:.15f}")
1475
+ print(f" Best infidelity: {1 - best_result['fidelity']:.3e}")
1476
+ print(f" Best strategy: {best_result['strategy']} (start {best_result['start_id']})")
1477
+ print(f" Fidelity range: [{min(fidelities):.10f}, {max(fidelities):.10f}]")
1478
+ print(f" Total time: {elapsed:.1f}s")
1479
+ print(f" Total evaluations: {total_evals}")
1480
+ print(f" Avg time/start: {elapsed / n_starts:.2f}s")
1481
+
1482
+ # Print all results sorted by fidelity
1483
+ print("\n All results (sorted by fidelity):")
1484
+ for r in sorted(successful_results, key=lambda x: -x["fidelity"])[:10]:
1485
+ print(f" Start {r['start_id']:2d} ({r['strategy']:18s}): F = {r['fidelity']:.12f}")
1486
+
1487
+ if return_all:
1488
+ return {
1489
+ "best": best_result,
1490
+ "all_results": all_results,
1491
+ "time": elapsed,
1492
+ "total_evals": total_evals,
1493
+ }
1494
+
1495
+ return best_result
1496
+
1497
+ def evaluate_population_parallel(
1498
+ self, population: np.ndarray, _chunk_size: int = None
1499
+ ) -> np.ndarray:
1500
+ """
1501
+ DEPRECATED: Use evaluate_population() instead.
1502
+ Evaluate fidelity for a population of parameter sets.
1503
+ """
1504
+ # Just delegate to the unified method
1505
+ return self.evaluate_population(population)
1506
+
1507
+ def optimize_cmaes_parallel(
1508
+ self,
1509
+ initial_params: np.ndarray = None,
1510
+ sigma0: float = 0.5,
1511
+ population_size: int = None,
1512
+ max_generations: int = 200,
1513
+ target_fidelity: float = 0.9999,
1514
+ ftol: float = 1e-12,
1515
+ ) -> dict:
1516
+ """
1517
+ CMA-ES optimization with parallel population evaluation.
1518
+
1519
+ CMA-ES (Covariance Matrix Adaptation Evolution Strategy) is highly
1520
+ effective for:
1521
+ - Non-convex optimization landscapes
1522
+ - Escaping local minima
1523
+ - High-dimensional parameter spaces
1524
+
1525
+ Combined with parallel evaluation, this provides robust global
1526
+ optimization with good speedup.
1527
+
1528
+ Args:
1529
+ initial_params: Starting point (None = smart init)
1530
+ sigma0: Initial step size
1531
+ population_size: Population size (None = auto)
1532
+ max_generations: Maximum generations
1533
+ target_fidelity: Stop early if achieved
1534
+ ftol: Function tolerance for convergence
1535
+
1536
+ Returns:
1537
+ Optimization results dictionary
1538
+ """
1539
+ try:
1540
+ import cma
1541
+ except ImportError:
1542
+ print("CMA-ES requires the 'cma' package. Install with: pip install cma")
1543
+ print("Falling back to standard optimization...")
1544
+ return self.optimize(initial_params)
1545
+
1546
+ if initial_params is None:
1547
+ initial_params = self.get_initial_params("smart")
1548
+
1549
+ if population_size is None:
1550
+ # CMA-ES default: 4 + floor(3 * ln(n))
1551
+ population_size = 4 + int(3 * np.log(self.n_params))
1552
+ # Round up to multiple of n_workers for efficiency
1553
+ population_size = (
1554
+ (population_size + self.config.n_workers - 1)
1555
+ // self.config.n_workers
1556
+ * self.config.n_workers
1557
+ )
1558
+
1559
+ print(f"\n{'=' * 80}")
1560
+ print("CMA-ES OPTIMIZATION (Parallel)")
1561
+ print(f"{'=' * 80}")
1562
+ print(f" Population size: {population_size}")
1563
+ print(f" Workers: {self.config.n_workers}")
1564
+ print(f" Max generations: {max_generations}")
1565
+ print(f" Initial sigma: {sigma0}")
1566
+ print(f" Target fidelity: {target_fidelity}")
1567
+
1568
+ start_time = time.time()
1569
+
1570
+ # CMA-ES options
1571
+ opts = {
1572
+ "popsize": population_size,
1573
+ "maxiter": max_generations,
1574
+ "ftarget": -target_fidelity, # We minimize -fidelity
1575
+ "tolfun": ftol,
1576
+ "verb_disp": 1 if self.config.verbose else 0,
1577
+ "verb_log": 0,
1578
+ "bounds": [-2 * np.pi, 2 * np.pi],
1579
+ }
1580
+
1581
+ # Initialize CMA-ES
1582
+ es = cma.CMAEvolutionStrategy(initial_params, sigma0, opts)
1583
+
1584
+ generation = 0
1585
+ history = {"fidelity": [], "generation": []}
1586
+
1587
+ while not es.stop():
1588
+ generation += 1
1589
+
1590
+ # Get population
1591
+ population = np.array(es.ask())
1592
+
1593
+ # Parallel evaluation
1594
+ fidelities = self.evaluate_population_parallel(population)
1595
+
1596
+ # CMA-ES minimizes, so negate fidelities
1597
+ es.tell(population, -fidelities)
1598
+
1599
+ # Track progress
1600
+ best_gen_fid = np.max(fidelities)
1601
+ history["fidelity"].append(best_gen_fid)
1602
+ history["generation"].append(generation)
1603
+
1604
+ # Progress output
1605
+ if self.config.verbose and generation % 10 == 0:
1606
+ print(
1607
+ f" Gen {generation:4d}: best F = {self.best_fidelity:.12f}, "
1608
+ f"gen best = {best_gen_fid:.10f}, sigma = {es.sigma:.4f}"
1609
+ )
1610
+
1611
+ # Early stopping if target achieved
1612
+ if self.best_fidelity >= target_fidelity:
1613
+ print(f"\n Target fidelity {target_fidelity} achieved at generation {generation}")
1614
+ break
1615
+
1616
+ elapsed = time.time() - start_time
1617
+
1618
+ # Get final result
1619
+ final_params = es.result.xbest
1620
+ final_psi = self.get_statevector(final_params)
1621
+ final_fidelity = self._compute_fidelity_fast(final_psi)
1622
+
1623
+ # Ensure we have the true best
1624
+ if final_fidelity > self.best_fidelity:
1625
+ self.best_fidelity = final_fidelity
1626
+ self.best_params = final_params
1627
+
1628
+ print(f"\n{'=' * 60}")
1629
+ print("CMA-ES Complete")
1630
+ print(f"{'=' * 60}")
1631
+ print(f" Final fidelity: {self.best_fidelity:.15f}")
1632
+ print(f" Infidelity: {1 - self.best_fidelity:.3e}")
1633
+ print(f" Generations: {generation}")
1634
+ print(f" Total time: {elapsed:.1f}s")
1635
+ print(f" Time/generation: {elapsed / generation:.2f}s")
1636
+
1637
+ return {
1638
+ "params": self.best_params,
1639
+ "fidelity": self.best_fidelity,
1640
+ "infidelity": 1 - self.best_fidelity,
1641
+ "generations": generation,
1642
+ "history": history,
1643
+ "time": elapsed,
1644
+ "cma_result": es.result,
1645
+ }
1646
+
1647
+ def plot_results(self, results: dict, save_path: Optional[str] = None):
1648
+ """Create visualization plots with high precision display"""
1649
+ try:
1650
+ fig, axes = plt.subplots(2, 2, figsize=(14, 11))
1651
+
1652
+ x = self.positions
1653
+ psi_circuit = results["final_statevector"]
1654
+ psi_target = self.target
1655
+
1656
+ # Plot 1: Probability densities (log scale option for high precision)
1657
+ ax = axes[0, 0]
1658
+ ax.plot(x, np.abs(psi_circuit) ** 2, "b-", label="Circuit", linewidth=2)
1659
+ ax.plot(
1660
+ x, np.abs(psi_target) ** 2, "r--", label="Target Gaussian", linewidth=2, alpha=0.8
1661
+ )
1662
+ ax.set_xlabel("Position x", fontsize=11)
1663
+ ax.set_ylabel("|ψ(x)|²", fontsize=11)
1664
+ ax.set_title(
1665
+ f"Probability Density (Fidelity = {results['fidelity']:.10f})", fontsize=12
1666
+ )
1667
+ ax.legend(fontsize=10)
1668
+ ax.grid(True, alpha=0.3)
1669
+
1670
+ # Plot 2: Real and imaginary parts
1671
+ ax = axes[0, 1]
1672
+ ax.plot(x, np.real(psi_circuit), "b-", label="Circuit (Real)", linewidth=1.5)
1673
+ ax.plot(
1674
+ x, np.imag(psi_circuit), "b--", label="Circuit (Imag)", linewidth=1.5, alpha=0.7
1675
+ )
1676
+ ax.plot(x, np.real(psi_target), "r-", label="Target (Real)", linewidth=1.5, alpha=0.8)
1677
+ ax.plot(x, np.imag(psi_target), "r--", label="Target (Imag)", linewidth=1.5, alpha=0.5)
1678
+ ax.set_xlabel("Position x", fontsize=11)
1679
+ ax.set_ylabel("Amplitude", fontsize=11)
1680
+ ax.set_title("Wavefunction Components", fontsize=12)
1681
+ ax.legend(fontsize=9)
1682
+ ax.grid(True, alpha=0.3)
1683
+
1684
+ # Plot 3: Difference (log scale for high precision)
1685
+ ax = axes[1, 0]
1686
+ difference = np.abs(psi_circuit - psi_target) ** 2
1687
+ max_diff = np.max(difference)
1688
+ ax.plot(x, difference, "g-", linewidth=2)
1689
+ ax.set_xlabel("Position x", fontsize=11)
1690
+ ax.set_ylabel("|ψ_circuit - ψ_target|²", fontsize=11)
1691
+ ax.set_title(f"Squared Difference (max = {max_diff:.3e})", fontsize=12)
1692
+ ax.set_yscale("log")
1693
+ ax.grid(True, alpha=0.3, which="both")
1694
+
1695
+ # Plot 4: Convergence with infidelity tracking
1696
+ ax = axes[1, 1]
1697
+ if len(self.history["fidelity"]) > 0:
1698
+ fidelities = np.array(self.history["fidelity"])
1699
+ infidelities = 1 - fidelities
1700
+
1701
+ # Plot on log scale to see high precision improvement
1702
+ ax.semilogy(
1703
+ self.history["iteration"], infidelities, "g-", linewidth=1.5, label="Infidelity"
1704
+ )
1705
+ ax.axhline(y=1e-3, color="r", linestyle="--", alpha=0.5, label="F=0.999")
1706
+ ax.axhline(y=1e-4, color="orange", linestyle="--", alpha=0.5, label="F=0.9999")
1707
+ ax.axhline(
1708
+ y=results["infidelity"],
1709
+ color="blue",
1710
+ linestyle="-",
1711
+ alpha=0.7,
1712
+ label=f"Final: 1-F={results['infidelity']:.2e}",
1713
+ )
1714
+ ax.set_xlabel("Function Evaluation", fontsize=11)
1715
+ ax.set_ylabel("Infidelity (1 - F)", fontsize=11)
1716
+ ax.set_title("Optimization Progress (Log Scale)", fontsize=12)
1717
+ ax.legend(fontsize=9)
1718
+ ax.grid(True, alpha=0.3, which="both")
1719
+
1720
+ # Add high-precision statistics text
1721
+ stats_text = (
1722
+ f"High Precision Results:\n"
1723
+ f"Fidelity: {results['fidelity']:.12f}\n"
1724
+ f"Infidelity: {results['infidelity']:.3e}\n"
1725
+ f"Circuit: μ={results['circuit_mean']:.8f}, σ={results['circuit_std']:.8f}\n"
1726
+ f"Target: μ={results['target_mean']:.8f}, σ={results['target_std']:.8f}\n"
1727
+ f"Errors: Δμ={results['mean_error']:.3e}, Δσ={results['std_error']:.3e}\n"
1728
+ f"Rel. σ error: {results['relative_std_error'] * 100:.2f}%\n"
1729
+ f"Time: {results['time']:.1f}s, Evals: {results['n_evaluations']}"
1730
+ )
1731
+ fig.text(
1732
+ 0.02,
1733
+ 0.02,
1734
+ stats_text,
1735
+ fontsize=9,
1736
+ family="monospace",
1737
+ bbox={"boxstyle": "round", "facecolor": "wheat", "alpha": 0.6},
1738
+ )
1739
+
1740
+ plt.suptitle(
1741
+ f"High Precision Gaussian State (n={self.config.n_qubits}, σ={self.config.sigma:.4f}, box=±{self.config.box_size:.2f})",
1742
+ fontsize=14,
1743
+ fontweight="bold",
1744
+ )
1745
+ plt.tight_layout()
1746
+
1747
+ if save_path:
1748
+ try:
1749
+ plt.savefig(save_path, dpi=200, bbox_inches="tight")
1750
+ print(f"Plot saved to: {save_path}")
1751
+ except Exception as e:
1752
+ print(f"Warning: Could not save plot: {e}")
1753
+
1754
+ plt.show()
1755
+
1756
+ return fig
1757
+
1758
+ except Exception as e:
1759
+ print(f"Warning: Could not create plot: {e}")
1760
+ import traceback
1761
+
1762
+ traceback.print_exc()
1763
+ return None
1764
+
1765
+ def save_results(self, results: dict, filepath: str = None):
1766
+ """Save high-precision parameters to text file"""
1767
+ if filepath is None:
1768
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
1769
+ filepath = f"gaussian_highprec_q{self.config.n_qubits}_s{self.config.sigma:.4f}_{timestamp}.txt"
1770
+
1771
+ try:
1772
+ with open(filepath, "w", encoding="utf-8") as f:
1773
+ f.write("=" * 80 + "\n")
1774
+ f.write("HIGH PRECISION GAUSSIAN STATE PREPARATION\n")
1775
+ f.write("=" * 80 + "\n\n")
1776
+
1777
+ f.write("CONFIGURATION\n")
1778
+ f.write("-" * 40 + "\n")
1779
+ f.write(f"Timestamp: {datetime.now().isoformat()}\n")
1780
+ f.write(f"Number of qubits: {self.config.n_qubits}\n")
1781
+ f.write(f"Number of params: {self.config.n_params}\n")
1782
+ f.write(f"Target sigma: {self.config.sigma:.10f}\n")
1783
+ f.write(f"Target x0: {self.config.x0:.10f}\n")
1784
+ f.write(f"Box size: +/-{self.config.box_size:.6f}\n")
1785
+ f.write(f"Grid points: {self.config.n_states}\n")
1786
+ f.write(f"Grid spacing: {self.config.delta_x:.10f}\n")
1787
+ f.write(f"Optimizer: {self.config.method}\n")
1788
+ f.write(f"Max iterations: {self.config.max_iter}\n")
1789
+ f.write(f"Max fun evals: {self.config.max_fun}\n")
1790
+ f.write(f"Tolerance: {self.config.tolerance:.2e}\n")
1791
+ f.write(f"High precision: {self.config.high_precision}\n")
1792
+ f.write(f"Refinement enabled: {self.config.enable_refinement}\n\n")
1793
+
1794
+ f.write("HIGH PRECISION RESULTS\n")
1795
+ f.write("-" * 40 + "\n")
1796
+ f.write(f"Fidelity: {results['fidelity']:.15f}\n")
1797
+ f.write(f"Infidelity (1-F): {results['infidelity']:.3e}\n")
1798
+ f.write(f"Circuit mean: {results['circuit_mean']:.12f}\n")
1799
+ f.write(f"Circuit std: {results['circuit_std']:.12f}\n")
1800
+ f.write(f"Target mean: {results['target_mean']:.12f}\n")
1801
+ f.write(f"Target std: {results['target_std']:.12f}\n")
1802
+ f.write(f"Error in mean: {results['mean_error']:.3e}\n")
1803
+ f.write(f"Error in std: {results['std_error']:.3e}\n")
1804
+ f.write(f"Relative std err: {results['relative_std_error'] * 100:.4f}%\n")
1805
+ f.write(f"Optimization time: {results['time']:.2f} seconds\n")
1806
+ f.write(f"Function evals: {results['n_evaluations']}\n")
1807
+ f.write(f"Success: {results['success']}\n")
1808
+ f.write(f"Message: {results.get('optimizer_message', 'N/A')}\n\n")
1809
+
1810
+ f.write("OPTIMAL PARAMETERS (15 decimal places)\n")
1811
+ f.write("-" * 40 + "\n")
1812
+ f.write("# Index Value\n")
1813
+ params = results["optimal_params"]
1814
+ for i, param in enumerate(params):
1815
+ f.write(f"{i:5d} {param:+.15f}\n")
1816
+
1817
+ f.write("\n" + "=" * 80 + "\n")
1818
+ f.write("# To load parameters:\n")
1819
+ f.write(
1820
+ f"# params = np.loadtxt('{os.path.basename(filepath)}', skiprows=N, usecols=1)\n"
1821
+ )
1822
+
1823
+ print(f"\nResults saved to: {filepath}")
1824
+
1825
+ # Save numpy array with full precision
1826
+ np_file = filepath.replace(".txt", "_params.npy")
1827
+ np.save(np_file, results["optimal_params"])
1828
+ print(f"Parameters saved to: {np_file}")
1829
+
1830
+ # Save JSON with results
1831
+ json_file = filepath.replace(".txt", "_results.json")
1832
+ json_data = {
1833
+ "fidelity": float(results["fidelity"]),
1834
+ "infidelity": float(results["infidelity"]),
1835
+ "circuit_mean": float(results["circuit_mean"]),
1836
+ "circuit_std": float(results["circuit_std"]),
1837
+ "target_mean": float(results["target_mean"]),
1838
+ "target_std": float(results["target_std"]),
1839
+ "mean_error": float(results["mean_error"]),
1840
+ "std_error": float(results["std_error"]),
1841
+ "time": float(results["time"]),
1842
+ "n_evaluations": int(results["n_evaluations"]),
1843
+ "config": {
1844
+ "n_qubits": self.config.n_qubits,
1845
+ "sigma": self.config.sigma,
1846
+ "x0": self.config.x0,
1847
+ "box_size": self.config.box_size,
1848
+ "method": self.config.method,
1849
+ "high_precision": self.config.high_precision,
1850
+ },
1851
+ }
1852
+ with open(json_file, "w") as f:
1853
+ json.dump(json_data, f, indent=2)
1854
+ print(f"JSON results saved to: {json_file}")
1855
+
1856
+ except Exception as e:
1857
+ print(f"Error saving results: {e}")
1858
+ import traceback
1859
+
1860
+ traceback.print_exc()
1861
+
1862
+ return filepath
1863
+
1864
+ def get_optimized_circuit(
1865
+ self,
1866
+ params: Optional[np.ndarray] = None,
1867
+ include_measurements: bool = False,
1868
+ ) -> QuantumCircuit:
1869
+ from .export import build_optimized_circuit
1870
+
1871
+ return build_optimized_circuit(self, params, include_measurements)
1872
+
1873
+ def export_qasm(
1874
+ self,
1875
+ params: Optional[np.ndarray] = None,
1876
+ include_measurements: bool = False,
1877
+ version: int = 2,
1878
+ ) -> str:
1879
+ from .export import export_to_qasm, export_to_qasm3
1880
+
1881
+ if version == 2:
1882
+ return export_to_qasm(self, params, include_measurements)
1883
+ elif version == 3:
1884
+ return export_to_qasm3(self, params, include_measurements)
1885
+ else:
1886
+ raise ValueError(f"OpenQASM version must be 2 or 3, got {version}")
1887
+
1888
+ def save_circuit(
1889
+ self,
1890
+ filepath: str,
1891
+ params: Optional[np.ndarray] = None,
1892
+ format: str = "qasm",
1893
+ include_measurements: bool = False,
1894
+ **kwargs,
1895
+ ) -> str:
1896
+ from .export import save_circuit
1897
+
1898
+ return str(save_circuit(self, filepath, params, format, include_measurements, **kwargs))