superquantx 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- superquantx/__init__.py +321 -0
- superquantx/algorithms/__init__.py +55 -0
- superquantx/algorithms/base_algorithm.py +413 -0
- superquantx/algorithms/hybrid_classifier.py +628 -0
- superquantx/algorithms/qaoa.py +406 -0
- superquantx/algorithms/quantum_agents.py +1006 -0
- superquantx/algorithms/quantum_kmeans.py +575 -0
- superquantx/algorithms/quantum_nn.py +544 -0
- superquantx/algorithms/quantum_pca.py +499 -0
- superquantx/algorithms/quantum_svm.py +346 -0
- superquantx/algorithms/vqe.py +553 -0
- superquantx/algorithms.py +863 -0
- superquantx/backends/__init__.py +265 -0
- superquantx/backends/base_backend.py +321 -0
- superquantx/backends/braket_backend.py +420 -0
- superquantx/backends/cirq_backend.py +466 -0
- superquantx/backends/ocean_backend.py +491 -0
- superquantx/backends/pennylane_backend.py +419 -0
- superquantx/backends/qiskit_backend.py +451 -0
- superquantx/backends/simulator_backend.py +455 -0
- superquantx/backends/tket_backend.py +519 -0
- superquantx/circuits.py +447 -0
- superquantx/cli/__init__.py +28 -0
- superquantx/cli/commands.py +528 -0
- superquantx/cli/main.py +254 -0
- superquantx/client.py +298 -0
- superquantx/config.py +326 -0
- superquantx/exceptions.py +287 -0
- superquantx/gates.py +588 -0
- superquantx/logging_config.py +347 -0
- superquantx/measurements.py +702 -0
- superquantx/ml.py +936 -0
- superquantx/noise.py +760 -0
- superquantx/utils/__init__.py +83 -0
- superquantx/utils/benchmarking.py +523 -0
- superquantx/utils/classical_utils.py +575 -0
- superquantx/utils/feature_mapping.py +467 -0
- superquantx/utils/optimization.py +410 -0
- superquantx/utils/quantum_utils.py +456 -0
- superquantx/utils/visualization.py +654 -0
- superquantx/version.py +33 -0
- superquantx-0.1.0.dist-info/METADATA +365 -0
- superquantx-0.1.0.dist-info/RECORD +46 -0
- superquantx-0.1.0.dist-info/WHEEL +4 -0
- superquantx-0.1.0.dist-info/entry_points.txt +2 -0
- superquantx-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,413 @@
|
|
1
|
+
"""Base classes for quantum machine learning algorithms.
|
2
|
+
|
3
|
+
This module provides abstract base classes that define the common interface
|
4
|
+
for all quantum algorithms in SuperQuantX.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
import time
|
9
|
+
from abc import ABC, abstractmethod
|
10
|
+
from dataclasses import dataclass
|
11
|
+
from typing import Any, Dict, Optional, Union
|
12
|
+
|
13
|
+
import numpy as np
|
14
|
+
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
@dataclass
|
19
|
+
class QuantumResult:
|
20
|
+
"""Container for quantum algorithm results.
|
21
|
+
|
22
|
+
This class provides a standardized way to return results from quantum
|
23
|
+
algorithms, including the main result, metadata, and performance metrics.
|
24
|
+
|
25
|
+
Attributes:
|
26
|
+
result: The main algorithm result
|
27
|
+
metadata: Additional information about the computation
|
28
|
+
execution_time: Time taken to execute the algorithm (seconds)
|
29
|
+
backend_info: Information about the backend used
|
30
|
+
error: Error information if computation failed
|
31
|
+
intermediate_results: Optional intermediate results for debugging
|
32
|
+
|
33
|
+
"""
|
34
|
+
|
35
|
+
result: Any
|
36
|
+
metadata: Dict[str, Any]
|
37
|
+
execution_time: float
|
38
|
+
backend_info: Dict[str, Any]
|
39
|
+
error: Optional[str] = None
|
40
|
+
intermediate_results: Optional[Dict[str, Any]] = None
|
41
|
+
|
42
|
+
def __post_init__(self):
|
43
|
+
"""Validate and process result after initialization."""
|
44
|
+
if self.metadata is None:
|
45
|
+
self.metadata = {}
|
46
|
+
if self.backend_info is None:
|
47
|
+
self.backend_info = {}
|
48
|
+
|
49
|
+
class BaseQuantumAlgorithm(ABC):
|
50
|
+
"""Abstract base class for all quantum machine learning algorithms.
|
51
|
+
|
52
|
+
This class defines the common interface that all quantum algorithms must
|
53
|
+
implement, providing consistency across different algorithm types and backends.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
backend: Quantum backend to use for computation
|
57
|
+
shots: Number of measurement shots (default: 1024)
|
58
|
+
seed: Random seed for reproducibility
|
59
|
+
optimization_level: Circuit optimization level (0-3)
|
60
|
+
**kwargs: Additional algorithm-specific parameters
|
61
|
+
|
62
|
+
"""
|
63
|
+
|
64
|
+
def __init__(
|
65
|
+
self,
|
66
|
+
backend: Union[str, Any],
|
67
|
+
shots: int = 1024,
|
68
|
+
seed: Optional[int] = None,
|
69
|
+
optimization_level: int = 1,
|
70
|
+
**kwargs
|
71
|
+
) -> None:
|
72
|
+
"""Initialize the quantum algorithm."""
|
73
|
+
self.backend = self._initialize_backend(backend)
|
74
|
+
self.shots = shots
|
75
|
+
self.seed = seed
|
76
|
+
self.optimization_level = optimization_level
|
77
|
+
|
78
|
+
# Algorithm state
|
79
|
+
self.is_fitted = False
|
80
|
+
self.training_history = []
|
81
|
+
self.best_params = None
|
82
|
+
self.best_score = None
|
83
|
+
|
84
|
+
# Store additional parameters
|
85
|
+
self.algorithm_params = kwargs
|
86
|
+
|
87
|
+
# Performance tracking
|
88
|
+
self.execution_times = []
|
89
|
+
self.backend_stats = {}
|
90
|
+
|
91
|
+
logger.info(f"Initialized {self.__class__.__name__} with backend {type(self.backend).__name__}")
|
92
|
+
|
93
|
+
def _initialize_backend(self, backend: Union[str, Any]) -> Any:
|
94
|
+
"""Initialize the quantum backend."""
|
95
|
+
if isinstance(backend, str):
|
96
|
+
# Import here to avoid circular imports
|
97
|
+
from ..backends import get_backend
|
98
|
+
return get_backend(backend)
|
99
|
+
return backend
|
100
|
+
|
101
|
+
@abstractmethod
|
102
|
+
def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> 'BaseQuantumAlgorithm':
|
103
|
+
"""Train the quantum algorithm.
|
104
|
+
|
105
|
+
Args:
|
106
|
+
X: Training data features
|
107
|
+
y: Training data labels (for supervised learning)
|
108
|
+
**kwargs: Additional training parameters
|
109
|
+
|
110
|
+
Returns:
|
111
|
+
Self for method chaining
|
112
|
+
|
113
|
+
"""
|
114
|
+
pass
|
115
|
+
|
116
|
+
@abstractmethod
|
117
|
+
def predict(self, X: np.ndarray, **kwargs) -> np.ndarray:
|
118
|
+
"""Make predictions using the trained algorithm.
|
119
|
+
|
120
|
+
Args:
|
121
|
+
X: Input data for prediction
|
122
|
+
**kwargs: Additional prediction parameters
|
123
|
+
|
124
|
+
Returns:
|
125
|
+
Predictions array
|
126
|
+
|
127
|
+
"""
|
128
|
+
pass
|
129
|
+
|
130
|
+
def score(self, X: np.ndarray, y: np.ndarray, **kwargs) -> float:
|
131
|
+
"""Compute the algorithm's score on the given test data.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
X: Test data features
|
135
|
+
y: True test data labels
|
136
|
+
**kwargs: Additional scoring parameters
|
137
|
+
|
138
|
+
Returns:
|
139
|
+
Algorithm score (higher is better)
|
140
|
+
|
141
|
+
"""
|
142
|
+
predictions = self.predict(X, **kwargs)
|
143
|
+
return self._compute_score(predictions, y)
|
144
|
+
|
145
|
+
def _compute_score(self, predictions: np.ndarray, y_true: np.ndarray) -> float:
|
146
|
+
"""Compute accuracy score by default."""
|
147
|
+
from sklearn.metrics import accuracy_score
|
148
|
+
return accuracy_score(y_true, predictions)
|
149
|
+
|
150
|
+
def get_params(self, deep: bool = True) -> Dict[str, Any]:
|
151
|
+
"""Get algorithm parameters.
|
152
|
+
|
153
|
+
Args:
|
154
|
+
deep: Whether to return deep copy of parameters
|
155
|
+
|
156
|
+
Returns:
|
157
|
+
Parameter dictionary
|
158
|
+
|
159
|
+
"""
|
160
|
+
params = {
|
161
|
+
'backend': self.backend,
|
162
|
+
'shots': self.shots,
|
163
|
+
'seed': self.seed,
|
164
|
+
'optimization_level': self.optimization_level,
|
165
|
+
}
|
166
|
+
params.update(self.algorithm_params)
|
167
|
+
return params
|
168
|
+
|
169
|
+
def set_params(self, **params) -> 'BaseQuantumAlgorithm':
|
170
|
+
"""Set algorithm parameters.
|
171
|
+
|
172
|
+
Args:
|
173
|
+
**params: Parameters to set
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
Self for method chaining
|
177
|
+
|
178
|
+
"""
|
179
|
+
for key, value in params.items():
|
180
|
+
if hasattr(self, key):
|
181
|
+
setattr(self, key, value)
|
182
|
+
else:
|
183
|
+
self.algorithm_params[key] = value
|
184
|
+
return self
|
185
|
+
|
186
|
+
def save_model(self, filepath: str) -> None:
|
187
|
+
"""Save the trained model to disk.
|
188
|
+
|
189
|
+
Args:
|
190
|
+
filepath: Path where to save the model
|
191
|
+
|
192
|
+
"""
|
193
|
+
import pickle
|
194
|
+
|
195
|
+
if not self.is_fitted:
|
196
|
+
logger.warning("Model is not fitted yet. Saving unfitted model.")
|
197
|
+
|
198
|
+
model_data = {
|
199
|
+
'class': self.__class__.__name__,
|
200
|
+
'params': self.get_params(),
|
201
|
+
'is_fitted': self.is_fitted,
|
202
|
+
'training_history': self.training_history,
|
203
|
+
'best_params': self.best_params,
|
204
|
+
'best_score': self.best_score,
|
205
|
+
}
|
206
|
+
|
207
|
+
with open(filepath, 'wb') as f:
|
208
|
+
pickle.dump(model_data, f)
|
209
|
+
|
210
|
+
logger.info(f"Model saved to {filepath}")
|
211
|
+
|
212
|
+
@classmethod
|
213
|
+
def load_model(cls, filepath: str) -> 'BaseQuantumAlgorithm':
|
214
|
+
"""Load a trained model from disk.
|
215
|
+
|
216
|
+
Args:
|
217
|
+
filepath: Path to the saved model
|
218
|
+
|
219
|
+
Returns:
|
220
|
+
Loaded algorithm instance
|
221
|
+
|
222
|
+
"""
|
223
|
+
import pickle
|
224
|
+
|
225
|
+
with open(filepath, 'rb') as f:
|
226
|
+
model_data = pickle.load(f)
|
227
|
+
|
228
|
+
# Create new instance with saved parameters
|
229
|
+
instance = cls(**model_data['params'])
|
230
|
+
instance.is_fitted = model_data['is_fitted']
|
231
|
+
instance.training_history = model_data['training_history']
|
232
|
+
instance.best_params = model_data['best_params']
|
233
|
+
instance.best_score = model_data['best_score']
|
234
|
+
|
235
|
+
logger.info(f"Model loaded from {filepath}")
|
236
|
+
return instance
|
237
|
+
|
238
|
+
def benchmark(self, X: np.ndarray, y: Optional[np.ndarray] = None, runs: int = 5) -> Dict[str, Any]:
|
239
|
+
"""Benchmark algorithm performance.
|
240
|
+
|
241
|
+
Args:
|
242
|
+
X: Test data
|
243
|
+
y: Test labels (optional)
|
244
|
+
runs: Number of benchmark runs
|
245
|
+
|
246
|
+
Returns:
|
247
|
+
Benchmark results dictionary
|
248
|
+
|
249
|
+
"""
|
250
|
+
execution_times = []
|
251
|
+
scores = []
|
252
|
+
|
253
|
+
for i in range(runs):
|
254
|
+
start_time = time.time()
|
255
|
+
|
256
|
+
if y is not None:
|
257
|
+
score = self.score(X, y)
|
258
|
+
scores.append(score)
|
259
|
+
else:
|
260
|
+
self.predict(X)
|
261
|
+
|
262
|
+
execution_time = time.time() - start_time
|
263
|
+
execution_times.append(execution_time)
|
264
|
+
|
265
|
+
results = {
|
266
|
+
'execution_times': execution_times,
|
267
|
+
'mean_execution_time': np.mean(execution_times),
|
268
|
+
'std_execution_time': np.std(execution_times),
|
269
|
+
'min_execution_time': np.min(execution_times),
|
270
|
+
'max_execution_time': np.max(execution_times),
|
271
|
+
}
|
272
|
+
|
273
|
+
if scores:
|
274
|
+
results.update({
|
275
|
+
'scores': scores,
|
276
|
+
'mean_score': np.mean(scores),
|
277
|
+
'std_score': np.std(scores),
|
278
|
+
'min_score': np.min(scores),
|
279
|
+
'max_score': np.max(scores),
|
280
|
+
})
|
281
|
+
|
282
|
+
return results
|
283
|
+
|
284
|
+
def get_circuit_info(self) -> Dict[str, Any]:
|
285
|
+
"""Get information about the quantum circuit.
|
286
|
+
|
287
|
+
Returns:
|
288
|
+
Circuit information dictionary
|
289
|
+
|
290
|
+
"""
|
291
|
+
return {
|
292
|
+
'backend': type(self.backend).__name__,
|
293
|
+
'shots': self.shots,
|
294
|
+
'optimization_level': self.optimization_level,
|
295
|
+
}
|
296
|
+
|
297
|
+
def reset(self) -> None:
|
298
|
+
"""Reset algorithm to untrained state."""
|
299
|
+
self.is_fitted = False
|
300
|
+
self.training_history = []
|
301
|
+
self.best_params = None
|
302
|
+
self.best_score = None
|
303
|
+
self.execution_times = []
|
304
|
+
self.backend_stats = {}
|
305
|
+
|
306
|
+
logger.info(f"Reset {self.__class__.__name__} to untrained state")
|
307
|
+
|
308
|
+
def __repr__(self) -> str:
|
309
|
+
"""String representation of the algorithm."""
|
310
|
+
params = self.get_params()
|
311
|
+
param_str = ", ".join([f"{k}={v}" for k, v in list(params.items())[:3]])
|
312
|
+
return f"{self.__class__.__name__}({param_str}, ...)"
|
313
|
+
|
314
|
+
def __str__(self) -> str:
|
315
|
+
"""Human-readable string representation."""
|
316
|
+
status = "fitted" if self.is_fitted else "unfitted"
|
317
|
+
return f"{self.__class__.__name__} ({status})"
|
318
|
+
|
319
|
+
class SupervisedQuantumAlgorithm(BaseQuantumAlgorithm):
|
320
|
+
"""Base class for supervised quantum learning algorithms."""
|
321
|
+
|
322
|
+
def __init__(self, *args, **kwargs):
|
323
|
+
super().__init__(*args, **kwargs)
|
324
|
+
self.classes_ = None
|
325
|
+
self.n_features_ = None
|
326
|
+
self.n_classes_ = None
|
327
|
+
|
328
|
+
def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'SupervisedQuantumAlgorithm':
|
329
|
+
"""Fit supervised algorithm."""
|
330
|
+
self._validate_data(X, y)
|
331
|
+
self.classes_ = np.unique(y)
|
332
|
+
self.n_features_ = X.shape[1]
|
333
|
+
self.n_classes_ = len(self.classes_)
|
334
|
+
return self
|
335
|
+
|
336
|
+
def _validate_data(self, X: np.ndarray, y: np.ndarray) -> None:
|
337
|
+
"""Validate input data."""
|
338
|
+
if X.shape[0] != y.shape[0]:
|
339
|
+
raise ValueError("X and y must have same number of samples")
|
340
|
+
if len(X.shape) != 2:
|
341
|
+
raise ValueError("X must be 2D array")
|
342
|
+
|
343
|
+
class UnsupervisedQuantumAlgorithm(BaseQuantumAlgorithm):
|
344
|
+
"""Base class for unsupervised quantum learning algorithms."""
|
345
|
+
|
346
|
+
def __init__(self, *args, **kwargs):
|
347
|
+
super().__init__(*args, **kwargs)
|
348
|
+
self.n_features_ = None
|
349
|
+
self.n_samples_ = None
|
350
|
+
|
351
|
+
def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> 'UnsupervisedQuantumAlgorithm':
|
352
|
+
"""Fit unsupervised algorithm."""
|
353
|
+
self._validate_data(X)
|
354
|
+
self.n_features_ = X.shape[1]
|
355
|
+
self.n_samples_ = X.shape[0]
|
356
|
+
return self
|
357
|
+
|
358
|
+
def _validate_data(self, X: np.ndarray) -> None:
|
359
|
+
"""Validate input data."""
|
360
|
+
if len(X.shape) != 2:
|
361
|
+
raise ValueError("X must be 2D array")
|
362
|
+
|
363
|
+
class OptimizationQuantumAlgorithm(BaseQuantumAlgorithm):
|
364
|
+
"""Base class for quantum optimization algorithms."""
|
365
|
+
|
366
|
+
def __init__(self, *args, **kwargs):
|
367
|
+
super().__init__(*args, **kwargs)
|
368
|
+
self.optimal_params_ = None
|
369
|
+
self.optimal_value_ = None
|
370
|
+
self.optimization_history_ = []
|
371
|
+
|
372
|
+
def optimize(self, objective_function, initial_params: Optional[np.ndarray] = None, **kwargs) -> QuantumResult:
|
373
|
+
"""Optimize objective function.
|
374
|
+
|
375
|
+
Args:
|
376
|
+
objective_function: Function to optimize
|
377
|
+
initial_params: Initial parameter values
|
378
|
+
**kwargs: Additional optimization parameters
|
379
|
+
|
380
|
+
Returns:
|
381
|
+
Optimization result
|
382
|
+
|
383
|
+
"""
|
384
|
+
start_time = time.time()
|
385
|
+
|
386
|
+
try:
|
387
|
+
result = self._run_optimization(objective_function, initial_params, **kwargs)
|
388
|
+
|
389
|
+
return QuantumResult(
|
390
|
+
result=result,
|
391
|
+
metadata={
|
392
|
+
'converged': True,
|
393
|
+
'n_iterations': len(self.optimization_history_),
|
394
|
+
'final_cost': self.optimal_value_,
|
395
|
+
},
|
396
|
+
execution_time=time.time() - start_time,
|
397
|
+
backend_info=self.get_circuit_info(),
|
398
|
+
)
|
399
|
+
|
400
|
+
except Exception as e:
|
401
|
+
logger.error(f"Optimization failed: {e}")
|
402
|
+
return QuantumResult(
|
403
|
+
result=None,
|
404
|
+
metadata={'converged': False},
|
405
|
+
execution_time=time.time() - start_time,
|
406
|
+
backend_info=self.get_circuit_info(),
|
407
|
+
error=str(e),
|
408
|
+
)
|
409
|
+
|
410
|
+
@abstractmethod
|
411
|
+
def _run_optimization(self, objective_function, initial_params, **kwargs):
|
412
|
+
"""Run the optimization algorithm."""
|
413
|
+
pass
|