superquantx 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. superquantx/__init__.py +321 -0
  2. superquantx/algorithms/__init__.py +55 -0
  3. superquantx/algorithms/base_algorithm.py +413 -0
  4. superquantx/algorithms/hybrid_classifier.py +628 -0
  5. superquantx/algorithms/qaoa.py +406 -0
  6. superquantx/algorithms/quantum_agents.py +1006 -0
  7. superquantx/algorithms/quantum_kmeans.py +575 -0
  8. superquantx/algorithms/quantum_nn.py +544 -0
  9. superquantx/algorithms/quantum_pca.py +499 -0
  10. superquantx/algorithms/quantum_svm.py +346 -0
  11. superquantx/algorithms/vqe.py +553 -0
  12. superquantx/algorithms.py +863 -0
  13. superquantx/backends/__init__.py +265 -0
  14. superquantx/backends/base_backend.py +321 -0
  15. superquantx/backends/braket_backend.py +420 -0
  16. superquantx/backends/cirq_backend.py +466 -0
  17. superquantx/backends/ocean_backend.py +491 -0
  18. superquantx/backends/pennylane_backend.py +419 -0
  19. superquantx/backends/qiskit_backend.py +451 -0
  20. superquantx/backends/simulator_backend.py +455 -0
  21. superquantx/backends/tket_backend.py +519 -0
  22. superquantx/circuits.py +447 -0
  23. superquantx/cli/__init__.py +28 -0
  24. superquantx/cli/commands.py +528 -0
  25. superquantx/cli/main.py +254 -0
  26. superquantx/client.py +298 -0
  27. superquantx/config.py +326 -0
  28. superquantx/exceptions.py +287 -0
  29. superquantx/gates.py +588 -0
  30. superquantx/logging_config.py +347 -0
  31. superquantx/measurements.py +702 -0
  32. superquantx/ml.py +936 -0
  33. superquantx/noise.py +760 -0
  34. superquantx/utils/__init__.py +83 -0
  35. superquantx/utils/benchmarking.py +523 -0
  36. superquantx/utils/classical_utils.py +575 -0
  37. superquantx/utils/feature_mapping.py +467 -0
  38. superquantx/utils/optimization.py +410 -0
  39. superquantx/utils/quantum_utils.py +456 -0
  40. superquantx/utils/visualization.py +654 -0
  41. superquantx/version.py +33 -0
  42. superquantx-0.1.0.dist-info/METADATA +365 -0
  43. superquantx-0.1.0.dist-info/RECORD +46 -0
  44. superquantx-0.1.0.dist-info/WHEEL +4 -0
  45. superquantx-0.1.0.dist-info/entry_points.txt +2 -0
  46. superquantx-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,410 @@
1
+ """Optimization utilities for quantum machine learning.
2
+
3
+ This module provides optimization functions for quantum circuits and parameters,
4
+ including classical optimizers commonly used in quantum machine learning.
5
+ """
6
+
7
+ import time
8
+ from abc import ABC, abstractmethod
9
+ from typing import Any, Callable, Dict, List, Optional, Tuple
10
+
11
+ import numpy as np
12
+
13
+
14
+ class Optimizer(ABC):
15
+ """Base class for optimizers."""
16
+
17
+ def __init__(self, learning_rate: float = 0.01):
18
+ self.learning_rate = learning_rate
19
+ self.history = []
20
+
21
+ @abstractmethod
22
+ def step(self, params: np.ndarray, gradients: np.ndarray) -> np.ndarray:
23
+ """Perform one optimization step."""
24
+ pass
25
+
26
+ def reset(self):
27
+ """Reset optimizer state."""
28
+ self.history = []
29
+
30
+
31
+ class GradientDescentOptimizer(Optimizer):
32
+ """Simple gradient descent optimizer."""
33
+
34
+ def __init__(self, learning_rate: float = 0.01):
35
+ super().__init__(learning_rate)
36
+
37
+ def step(self, params: np.ndarray, gradients: np.ndarray) -> np.ndarray:
38
+ """Perform gradient descent step."""
39
+ new_params = params - self.learning_rate * gradients
40
+ return new_params
41
+
42
+
43
+ class AdamOptimizer(Optimizer):
44
+ """Adam optimizer for quantum parameter optimization."""
45
+
46
+ def __init__(
47
+ self,
48
+ learning_rate: float = 0.01,
49
+ beta1: float = 0.9,
50
+ beta2: float = 0.999,
51
+ epsilon: float = 1e-8
52
+ ):
53
+ super().__init__(learning_rate)
54
+ self.beta1 = beta1
55
+ self.beta2 = beta2
56
+ self.epsilon = epsilon
57
+ self.m = None
58
+ self.v = None
59
+ self.t = 0
60
+
61
+ def step(self, params: np.ndarray, gradients: np.ndarray) -> np.ndarray:
62
+ """Perform Adam optimization step."""
63
+ if self.m is None:
64
+ self.m = np.zeros_like(params)
65
+ self.v = np.zeros_like(params)
66
+
67
+ self.t += 1
68
+
69
+ # Update biased first moment estimate
70
+ self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
71
+
72
+ # Update biased second raw moment estimate
73
+ self.v = self.beta2 * self.v + (1 - self.beta2) * (gradients ** 2)
74
+
75
+ # Compute bias-corrected first moment estimate
76
+ m_hat = self.m / (1 - self.beta1 ** self.t)
77
+
78
+ # Compute bias-corrected second raw moment estimate
79
+ v_hat = self.v / (1 - self.beta2 ** self.t)
80
+
81
+ # Update parameters
82
+ new_params = params - self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)
83
+
84
+ return new_params
85
+
86
+ def reset(self):
87
+ """Reset Adam optimizer state."""
88
+ super().reset()
89
+ self.m = None
90
+ self.v = None
91
+ self.t = 0
92
+
93
+
94
+ def optimize_circuit(
95
+ cost_function: Callable[[np.ndarray], float],
96
+ initial_params: np.ndarray,
97
+ gradient_function: Optional[Callable[[np.ndarray], np.ndarray]] = None,
98
+ optimizer: str = 'adam',
99
+ max_iterations: int = 100,
100
+ tolerance: float = 1e-6,
101
+ learning_rate: float = 0.01,
102
+ verbose: bool = False
103
+ ) -> Dict[str, Any]:
104
+ """Optimize quantum circuit parameters.
105
+
106
+ Args:
107
+ cost_function: Function to minimize f(params) -> cost
108
+ initial_params: Initial parameter values
109
+ gradient_function: Function to compute gradients (optional)
110
+ optimizer: Optimizer type ('adam', 'sgd', 'lbfgs')
111
+ max_iterations: Maximum number of iterations
112
+ tolerance: Convergence tolerance
113
+ learning_rate: Learning rate for gradient-based optimizers
114
+ verbose: Whether to print progress
115
+
116
+ Returns:
117
+ Dictionary with optimization results
118
+
119
+ """
120
+ start_time = time.time()
121
+
122
+ # Initialize optimizer
123
+ if optimizer == 'adam':
124
+ opt = AdamOptimizer(learning_rate)
125
+ elif optimizer == 'sgd':
126
+ opt = GradientDescentOptimizer(learning_rate)
127
+ else:
128
+ raise ValueError(f"Unknown optimizer: {optimizer}")
129
+
130
+ params = initial_params.copy()
131
+ costs = []
132
+
133
+ # If no gradient function provided, use finite differences
134
+ if gradient_function is None:
135
+ gradient_function = lambda p: finite_difference_gradient(cost_function, p)
136
+
137
+ for iteration in range(max_iterations):
138
+ # Compute cost and gradient
139
+ cost = cost_function(params)
140
+ gradients = gradient_function(params)
141
+
142
+ costs.append(cost)
143
+
144
+ if verbose and iteration % 10 == 0:
145
+ print(f"Iteration {iteration}: Cost = {cost:.6f}")
146
+
147
+ # Check convergence
148
+ if iteration > 0 and abs(costs[-2] - cost) < tolerance:
149
+ if verbose:
150
+ print(f"Converged at iteration {iteration}")
151
+ break
152
+
153
+ # Update parameters
154
+ params = opt.step(params, gradients)
155
+
156
+ optimization_time = time.time() - start_time
157
+
158
+ return {
159
+ 'optimal_params': params,
160
+ 'optimal_cost': costs[-1],
161
+ 'cost_history': costs,
162
+ 'n_iterations': len(costs),
163
+ 'converged': len(costs) < max_iterations,
164
+ 'optimization_time': optimization_time,
165
+ 'optimizer': optimizer
166
+ }
167
+
168
+
169
+ def optimize_parameters(
170
+ objective_function: Callable,
171
+ bounds: List[Tuple[float, float]],
172
+ method: str = 'scipy',
173
+ max_evaluations: int = 1000,
174
+ random_state: Optional[int] = None
175
+ ) -> Dict[str, Any]:
176
+ """Optimize parameters using various methods.
177
+
178
+ Args:
179
+ objective_function: Function to minimize
180
+ bounds: Parameter bounds as list of (min, max) tuples
181
+ method: Optimization method ('scipy', 'random_search', 'grid_search')
182
+ max_evaluations: Maximum function evaluations
183
+ random_state: Random seed
184
+
185
+ Returns:
186
+ Optimization results dictionary
187
+
188
+ """
189
+ if method == 'scipy':
190
+ return _scipy_optimize(objective_function, bounds, max_evaluations)
191
+ elif method == 'random_search':
192
+ return _random_search_optimize(objective_function, bounds, max_evaluations, random_state)
193
+ elif method == 'grid_search':
194
+ return _grid_search_optimize(objective_function, bounds, max_evaluations)
195
+ else:
196
+ raise ValueError(f"Unknown optimization method: {method}")
197
+
198
+
199
+ def gradient_descent(
200
+ cost_function: Callable[[np.ndarray], float],
201
+ gradient_function: Callable[[np.ndarray], np.ndarray],
202
+ initial_params: np.ndarray,
203
+ learning_rate: float = 0.01,
204
+ max_iterations: int = 1000,
205
+ tolerance: float = 1e-6
206
+ ) -> Tuple[np.ndarray, List[float]]:
207
+ """Perform gradient descent optimization.
208
+
209
+ Args:
210
+ cost_function: Cost function to minimize
211
+ gradient_function: Function returning gradients
212
+ initial_params: Initial parameter values
213
+ learning_rate: Learning rate
214
+ max_iterations: Maximum iterations
215
+ tolerance: Convergence tolerance
216
+
217
+ Returns:
218
+ Tuple of (optimal_params, cost_history)
219
+
220
+ """
221
+ params = initial_params.copy()
222
+ cost_history = []
223
+
224
+ for i in range(max_iterations):
225
+ cost = cost_function(params)
226
+ cost_history.append(cost)
227
+
228
+ if i > 0 and abs(cost_history[-2] - cost) < tolerance:
229
+ break
230
+
231
+ gradients = gradient_function(params)
232
+ params = params - learning_rate * gradients
233
+
234
+ return params, cost_history
235
+
236
+
237
+ def adam_optimizer(
238
+ cost_function: Callable[[np.ndarray], float],
239
+ gradient_function: Callable[[np.ndarray], np.ndarray],
240
+ initial_params: np.ndarray,
241
+ learning_rate: float = 0.001,
242
+ beta1: float = 0.9,
243
+ beta2: float = 0.999,
244
+ epsilon: float = 1e-8,
245
+ max_iterations: int = 1000,
246
+ tolerance: float = 1e-6
247
+ ) -> Tuple[np.ndarray, List[float]]:
248
+ """Perform Adam optimization.
249
+
250
+ Args:
251
+ cost_function: Cost function to minimize
252
+ gradient_function: Function returning gradients
253
+ initial_params: Initial parameter values
254
+ learning_rate: Learning rate
255
+ beta1: Exponential decay rate for first moment
256
+ beta2: Exponential decay rate for second moment
257
+ epsilon: Small constant for numerical stability
258
+ max_iterations: Maximum iterations
259
+ tolerance: Convergence tolerance
260
+
261
+ Returns:
262
+ Tuple of (optimal_params, cost_history)
263
+
264
+ """
265
+ optimizer = AdamOptimizer(learning_rate, beta1, beta2, epsilon)
266
+ params = initial_params.copy()
267
+ cost_history = []
268
+
269
+ for i in range(max_iterations):
270
+ cost = cost_function(params)
271
+ cost_history.append(cost)
272
+
273
+ if i > 0 and abs(cost_history[-2] - cost) < tolerance:
274
+ break
275
+
276
+ gradients = gradient_function(params)
277
+ params = optimizer.step(params, gradients)
278
+
279
+ return params, cost_history
280
+
281
+
282
+ def finite_difference_gradient(
283
+ function: Callable[[np.ndarray], float],
284
+ params: np.ndarray,
285
+ epsilon: float = 1e-6
286
+ ) -> np.ndarray:
287
+ """Compute gradient using finite differences.
288
+
289
+ Args:
290
+ function: Function to differentiate
291
+ params: Parameters at which to compute gradient
292
+ epsilon: Finite difference step size
293
+
294
+ Returns:
295
+ Gradient vector
296
+
297
+ """
298
+ gradients = np.zeros_like(params)
299
+
300
+ for i in range(len(params)):
301
+ # Forward difference
302
+ params_plus = params.copy()
303
+ params_plus[i] += epsilon
304
+
305
+ params_minus = params.copy()
306
+ params_minus[i] -= epsilon
307
+
308
+ gradients[i] = (function(params_plus) - function(params_minus)) / (2 * epsilon)
309
+
310
+ return gradients
311
+
312
+
313
+ def _scipy_optimize(
314
+ objective_function: Callable,
315
+ bounds: List[Tuple[float, float]],
316
+ max_evaluations: int
317
+ ) -> Dict[str, Any]:
318
+ """Optimize using scipy methods."""
319
+ try:
320
+ from scipy.optimize import minimize
321
+
322
+ # Initial guess (center of bounds)
323
+ x0 = [(b[0] + b[1]) / 2 for b in bounds]
324
+
325
+ result = minimize(
326
+ objective_function,
327
+ x0,
328
+ bounds=bounds,
329
+ options={'maxiter': max_evaluations}
330
+ )
331
+
332
+ return {
333
+ 'optimal_params': result.x,
334
+ 'optimal_value': result.fun,
335
+ 'n_evaluations': result.nfev,
336
+ 'success': result.success,
337
+ 'method': 'scipy'
338
+ }
339
+ except ImportError:
340
+ raise ImportError("scipy is required for scipy optimization")
341
+
342
+
343
+ def _random_search_optimize(
344
+ objective_function: Callable,
345
+ bounds: List[Tuple[float, float]],
346
+ max_evaluations: int,
347
+ random_state: Optional[int]
348
+ ) -> Dict[str, Any]:
349
+ """Random search optimization."""
350
+ np.random.seed(random_state)
351
+
352
+ best_params = None
353
+ best_value = float('inf')
354
+
355
+ for _ in range(max_evaluations):
356
+ # Generate random parameters within bounds
357
+ params = [np.random.uniform(b[0], b[1]) for b in bounds]
358
+ value = objective_function(params)
359
+
360
+ if value < best_value:
361
+ best_value = value
362
+ best_params = params
363
+
364
+ return {
365
+ 'optimal_params': np.array(best_params),
366
+ 'optimal_value': best_value,
367
+ 'n_evaluations': max_evaluations,
368
+ 'success': True,
369
+ 'method': 'random_search'
370
+ }
371
+
372
+
373
+ def _grid_search_optimize(
374
+ objective_function: Callable,
375
+ bounds: List[Tuple[float, float]],
376
+ max_evaluations: int
377
+ ) -> Dict[str, Any]:
378
+ """Grid search optimization."""
379
+ n_params = len(bounds)
380
+ n_points_per_dim = int(max_evaluations ** (1 / n_params))
381
+
382
+ # Create grid points
383
+ grids = []
384
+ for b in bounds:
385
+ grids.append(np.linspace(b[0], b[1], n_points_per_dim))
386
+
387
+ best_params = None
388
+ best_value = float('inf')
389
+ n_evaluations = 0
390
+
391
+ # Evaluate all grid points
392
+ for params in np.ndindex(*[len(g) for g in grids]):
393
+ if n_evaluations >= max_evaluations:
394
+ break
395
+
396
+ param_values = [grids[i][params[i]] for i in range(n_params)]
397
+ value = objective_function(param_values)
398
+ n_evaluations += 1
399
+
400
+ if value < best_value:
401
+ best_value = value
402
+ best_params = param_values
403
+
404
+ return {
405
+ 'optimal_params': np.array(best_params),
406
+ 'optimal_value': best_value,
407
+ 'n_evaluations': n_evaluations,
408
+ 'success': True,
409
+ 'method': 'grid_search'
410
+ }