superquantx 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. superquantx/__init__.py +321 -0
  2. superquantx/algorithms/__init__.py +55 -0
  3. superquantx/algorithms/base_algorithm.py +413 -0
  4. superquantx/algorithms/hybrid_classifier.py +628 -0
  5. superquantx/algorithms/qaoa.py +406 -0
  6. superquantx/algorithms/quantum_agents.py +1006 -0
  7. superquantx/algorithms/quantum_kmeans.py +575 -0
  8. superquantx/algorithms/quantum_nn.py +544 -0
  9. superquantx/algorithms/quantum_pca.py +499 -0
  10. superquantx/algorithms/quantum_svm.py +346 -0
  11. superquantx/algorithms/vqe.py +553 -0
  12. superquantx/algorithms.py +863 -0
  13. superquantx/backends/__init__.py +265 -0
  14. superquantx/backends/base_backend.py +321 -0
  15. superquantx/backends/braket_backend.py +420 -0
  16. superquantx/backends/cirq_backend.py +466 -0
  17. superquantx/backends/ocean_backend.py +491 -0
  18. superquantx/backends/pennylane_backend.py +419 -0
  19. superquantx/backends/qiskit_backend.py +451 -0
  20. superquantx/backends/simulator_backend.py +455 -0
  21. superquantx/backends/tket_backend.py +519 -0
  22. superquantx/circuits.py +447 -0
  23. superquantx/cli/__init__.py +28 -0
  24. superquantx/cli/commands.py +528 -0
  25. superquantx/cli/main.py +254 -0
  26. superquantx/client.py +298 -0
  27. superquantx/config.py +326 -0
  28. superquantx/exceptions.py +287 -0
  29. superquantx/gates.py +588 -0
  30. superquantx/logging_config.py +347 -0
  31. superquantx/measurements.py +702 -0
  32. superquantx/ml.py +936 -0
  33. superquantx/noise.py +760 -0
  34. superquantx/utils/__init__.py +83 -0
  35. superquantx/utils/benchmarking.py +523 -0
  36. superquantx/utils/classical_utils.py +575 -0
  37. superquantx/utils/feature_mapping.py +467 -0
  38. superquantx/utils/optimization.py +410 -0
  39. superquantx/utils/quantum_utils.py +456 -0
  40. superquantx/utils/visualization.py +654 -0
  41. superquantx/version.py +33 -0
  42. superquantx-0.1.0.dist-info/METADATA +365 -0
  43. superquantx-0.1.0.dist-info/RECORD +46 -0
  44. superquantx-0.1.0.dist-info/WHEEL +4 -0
  45. superquantx-0.1.0.dist-info/entry_points.txt +2 -0
  46. superquantx-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,83 @@
1
+ """SuperQuantX Utilities Module.
2
+
3
+ This module provides utility functions and classes for quantum machine learning,
4
+ including circuit optimization, visualization, benchmarking, and feature mapping.
5
+ """
6
+
7
+ from .benchmarking import (
8
+ benchmark_algorithm,
9
+ benchmark_backend,
10
+ compare_algorithms,
11
+ performance_metrics,
12
+ )
13
+ from .classical_utils import (
14
+ cross_validation,
15
+ data_splitting,
16
+ hyperparameter_search,
17
+ model_selection,
18
+ )
19
+ from .feature_mapping import (
20
+ QuantumFeatureMap,
21
+ create_feature_map,
22
+ pauli_feature_map,
23
+ zz_feature_map,
24
+ )
25
+ from .optimization import (
26
+ adam_optimizer,
27
+ gradient_descent,
28
+ optimize_circuit,
29
+ optimize_parameters,
30
+ )
31
+ from .quantum_utils import (
32
+ entanglement_measure,
33
+ fidelity,
34
+ quantum_mutual_information,
35
+ trace_distance,
36
+ )
37
+ from .visualization import (
38
+ plot_bloch_sphere,
39
+ plot_circuit,
40
+ plot_optimization_history,
41
+ plot_quantum_state,
42
+ visualize_results,
43
+ )
44
+
45
+
46
+ __all__ = [
47
+ # Optimization
48
+ "optimize_circuit",
49
+ "optimize_parameters",
50
+ "gradient_descent",
51
+ "adam_optimizer",
52
+
53
+ # Visualization
54
+ "visualize_results",
55
+ "plot_optimization_history",
56
+ "plot_circuit",
57
+ "plot_quantum_state",
58
+ "plot_bloch_sphere",
59
+
60
+ # Benchmarking
61
+ "benchmark_algorithm",
62
+ "benchmark_backend",
63
+ "performance_metrics",
64
+ "compare_algorithms",
65
+
66
+ # Feature mapping
67
+ "QuantumFeatureMap",
68
+ "create_feature_map",
69
+ "pauli_feature_map",
70
+ "zz_feature_map",
71
+
72
+ # Quantum utilities
73
+ "fidelity",
74
+ "trace_distance",
75
+ "quantum_mutual_information",
76
+ "entanglement_measure",
77
+
78
+ # Classical utilities
79
+ "cross_validation",
80
+ "hyperparameter_search",
81
+ "model_selection",
82
+ "data_splitting",
83
+ ]
@@ -0,0 +1,523 @@
1
+ """Benchmarking utilities for quantum machine learning.
2
+
3
+ This module provides functions to benchmark quantum algorithms and backends,
4
+ compare performance, and generate comprehensive performance reports.
5
+ """
6
+
7
+ import json
8
+ import time
9
+ from dataclasses import dataclass
10
+ from pathlib import Path
11
+ from typing import Any, Callable, Dict, List, Optional, Tuple
12
+
13
+ import numpy as np
14
+
15
+
16
+ try:
17
+ import psutil
18
+ HAS_PSUTIL = True
19
+ except ImportError:
20
+ HAS_PSUTIL = False
21
+
22
+
23
+ @dataclass
24
+ class BenchmarkResult:
25
+ """Container for benchmark results."""
26
+
27
+ algorithm_name: str
28
+ backend_name: str
29
+ dataset_name: str
30
+ execution_time: float
31
+ memory_usage: Optional[float]
32
+ accuracy: Optional[float]
33
+ loss: Optional[float]
34
+ n_parameters: Optional[int]
35
+ n_qubits: Optional[int]
36
+ n_iterations: Optional[int]
37
+ success: bool
38
+ error_message: Optional[str]
39
+ metadata: Dict[str, Any]
40
+
41
+
42
+ def benchmark_algorithm(
43
+ algorithm: Any,
44
+ datasets: List[Tuple[str, Any]],
45
+ metrics: Optional[List[str]] = None,
46
+ n_runs: int = 1,
47
+ verbose: bool = True
48
+ ) -> List[BenchmarkResult]:
49
+ """Benchmark quantum algorithm performance across multiple datasets.
50
+
51
+ Args:
52
+ algorithm: Quantum algorithm instance
53
+ datasets: List of (name, dataset) tuples
54
+ metrics: List of metrics to compute
55
+ n_runs: Number of runs for averaging
56
+ verbose: Whether to print progress
57
+
58
+ Returns:
59
+ List of benchmark results
60
+
61
+ """
62
+ if metrics is None:
63
+ metrics = ['accuracy', 'execution_time', 'memory_usage']
64
+
65
+ results = []
66
+
67
+ for dataset_name, dataset in datasets:
68
+ if verbose:
69
+ print(f"Benchmarking {algorithm.__class__.__name__} on {dataset_name}...")
70
+
71
+ dataset_results = []
72
+
73
+ for run in range(n_runs):
74
+ if verbose and n_runs > 1:
75
+ print(f" Run {run + 1}/{n_runs}")
76
+
77
+ result = _run_single_benchmark(
78
+ algorithm, dataset_name, dataset, metrics
79
+ )
80
+ dataset_results.append(result)
81
+
82
+ # Average results if multiple runs
83
+ if n_runs > 1:
84
+ averaged_result = _average_benchmark_results(dataset_results)
85
+ results.append(averaged_result)
86
+ else:
87
+ results.extend(dataset_results)
88
+
89
+ return results
90
+
91
+
92
+ def benchmark_backend(
93
+ backends: List[Any],
94
+ test_circuit: Callable,
95
+ n_qubits_range: List[int] = [2, 4, 6, 8],
96
+ n_shots: int = 1024,
97
+ verbose: bool = True
98
+ ) -> Dict[str, List[BenchmarkResult]]:
99
+ """Benchmark different quantum backends.
100
+
101
+ Args:
102
+ backends: List of backend instances
103
+ test_circuit: Function that creates test circuit
104
+ n_qubits_range: Range of qubit numbers to test
105
+ n_shots: Number of shots for each measurement
106
+ verbose: Whether to print progress
107
+
108
+ Returns:
109
+ Dictionary mapping backend names to benchmark results
110
+
111
+ """
112
+ results = {}
113
+
114
+ for backend in backends:
115
+ backend_name = getattr(backend, 'name', backend.__class__.__name__)
116
+ if verbose:
117
+ print(f"Benchmarking backend: {backend_name}")
118
+
119
+ backend_results = []
120
+
121
+ for n_qubits in n_qubits_range:
122
+ if verbose:
123
+ print(f" Testing {n_qubits} qubits...")
124
+
125
+ try:
126
+ start_time = time.time()
127
+ start_memory = _get_memory_usage()
128
+
129
+ # Create and run circuit
130
+ circuit = test_circuit(n_qubits)
131
+ result = backend.run(circuit, shots=n_shots)
132
+
133
+ execution_time = time.time() - start_time
134
+ memory_usage = _get_memory_usage() - start_memory if start_memory else None
135
+
136
+ benchmark_result = BenchmarkResult(
137
+ algorithm_name="test_circuit",
138
+ backend_name=backend_name,
139
+ dataset_name=f"{n_qubits}_qubits",
140
+ execution_time=execution_time,
141
+ memory_usage=memory_usage,
142
+ accuracy=None,
143
+ loss=None,
144
+ n_parameters=None,
145
+ n_qubits=n_qubits,
146
+ n_iterations=None,
147
+ success=True,
148
+ error_message=None,
149
+ metadata={
150
+ 'n_shots': n_shots,
151
+ 'result_counts': getattr(result, 'counts', None)
152
+ }
153
+ )
154
+
155
+ except Exception as e:
156
+ benchmark_result = BenchmarkResult(
157
+ algorithm_name="test_circuit",
158
+ backend_name=backend_name,
159
+ dataset_name=f"{n_qubits}_qubits",
160
+ execution_time=0,
161
+ memory_usage=None,
162
+ accuracy=None,
163
+ loss=None,
164
+ n_parameters=None,
165
+ n_qubits=n_qubits,
166
+ n_iterations=None,
167
+ success=False,
168
+ error_message=str(e),
169
+ metadata={'n_shots': n_shots}
170
+ )
171
+
172
+ backend_results.append(benchmark_result)
173
+
174
+ results[backend_name] = backend_results
175
+
176
+ return results
177
+
178
+
179
+ def performance_metrics(
180
+ y_true: np.ndarray,
181
+ y_pred: np.ndarray,
182
+ task_type: str = 'classification'
183
+ ) -> Dict[str, float]:
184
+ """Compute performance metrics for predictions.
185
+
186
+ Args:
187
+ y_true: True labels/values
188
+ y_pred: Predicted labels/values
189
+ task_type: Type of task ('classification' or 'regression')
190
+
191
+ Returns:
192
+ Dictionary of computed metrics
193
+
194
+ """
195
+ metrics = {}
196
+
197
+ if task_type == 'classification':
198
+ # Accuracy
199
+ metrics['accuracy'] = np.mean(y_true == y_pred)
200
+
201
+ # Precision, Recall, F1 for binary classification
202
+ if len(np.unique(y_true)) == 2:
203
+ tp = np.sum((y_true == 1) & (y_pred == 1))
204
+ fp = np.sum((y_true == 0) & (y_pred == 1))
205
+ fn = np.sum((y_true == 1) & (y_pred == 0))
206
+
207
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0
208
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0
209
+ f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
210
+
211
+ metrics['precision'] = precision
212
+ metrics['recall'] = recall
213
+ metrics['f1_score'] = f1
214
+
215
+ # Confusion matrix elements
216
+ unique_labels = np.unique(y_true)
217
+ confusion_matrix = np.zeros((len(unique_labels), len(unique_labels)))
218
+
219
+ for i, true_label in enumerate(unique_labels):
220
+ for j, pred_label in enumerate(unique_labels):
221
+ confusion_matrix[i, j] = np.sum((y_true == true_label) & (y_pred == pred_label))
222
+
223
+ metrics['confusion_matrix'] = confusion_matrix.tolist()
224
+
225
+ elif task_type == 'regression':
226
+ # Mean Squared Error
227
+ mse = np.mean((y_true - y_pred) ** 2)
228
+ metrics['mse'] = mse
229
+ metrics['rmse'] = np.sqrt(mse)
230
+
231
+ # Mean Absolute Error
232
+ metrics['mae'] = np.mean(np.abs(y_true - y_pred))
233
+
234
+ # R-squared
235
+ ss_res = np.sum((y_true - y_pred) ** 2)
236
+ ss_tot = np.sum((y_true - np.mean(y_true)) ** 2)
237
+ r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0
238
+ metrics['r_squared'] = r_squared
239
+
240
+ # Explained variance
241
+ metrics['explained_variance'] = 1 - np.var(y_true - y_pred) / np.var(y_true)
242
+
243
+ return metrics
244
+
245
+
246
+ def compare_algorithms(
247
+ algorithms: List[Any],
248
+ dataset: Any,
249
+ metrics: List[str] = ['accuracy', 'execution_time'],
250
+ n_runs: int = 3,
251
+ verbose: bool = True
252
+ ) -> Dict[str, Any]:
253
+ """Compare multiple algorithms on the same dataset.
254
+
255
+ Args:
256
+ algorithms: List of algorithm instances
257
+ dataset: Dataset to use for comparison
258
+ metrics: Metrics to compare
259
+ n_runs: Number of runs for averaging
260
+ verbose: Whether to print progress
261
+
262
+ Returns:
263
+ Comparison results dictionary
264
+
265
+ """
266
+ comparison_results = {
267
+ 'algorithms': [],
268
+ 'metrics': metrics,
269
+ 'n_runs': n_runs,
270
+ 'results': {}
271
+ }
272
+
273
+ for algorithm in algorithms:
274
+ algorithm_name = algorithm.__class__.__name__
275
+ comparison_results['algorithms'].append(algorithm_name)
276
+
277
+ if verbose:
278
+ print(f"Running {algorithm_name}...")
279
+
280
+ # Run benchmark
281
+ benchmark_results = benchmark_algorithm(
282
+ algorithm,
283
+ [('comparison_dataset', dataset)],
284
+ metrics=metrics,
285
+ n_runs=n_runs,
286
+ verbose=False
287
+ )
288
+
289
+ # Extract averaged metrics
290
+ result = benchmark_results[0]
291
+ comparison_results['results'][algorithm_name] = {
292
+ 'execution_time': result.execution_time,
293
+ 'accuracy': result.accuracy,
294
+ 'memory_usage': result.memory_usage,
295
+ 'success': result.success,
296
+ 'error_message': result.error_message
297
+ }
298
+
299
+ # Find best performing algorithm for each metric
300
+ comparison_results['best_algorithm'] = {}
301
+ for metric in metrics:
302
+ if metric == 'execution_time' or metric == 'memory_usage':
303
+ # Lower is better
304
+ best_value = float('inf')
305
+ best_algorithm = None
306
+ for alg_name, results in comparison_results['results'].items():
307
+ if results.get(metric) and results[metric] < best_value:
308
+ best_value = results[metric]
309
+ best_algorithm = alg_name
310
+ else:
311
+ # Higher is better
312
+ best_value = float('-inf')
313
+ best_algorithm = None
314
+ for alg_name, results in comparison_results['results'].items():
315
+ if results.get(metric) and results[metric] > best_value:
316
+ best_value = results[metric]
317
+ best_algorithm = alg_name
318
+
319
+ comparison_results['best_algorithm'][metric] = best_algorithm
320
+
321
+ return comparison_results
322
+
323
+
324
+ def generate_benchmark_report(
325
+ results: List[BenchmarkResult],
326
+ output_path: Optional[str] = None
327
+ ) -> Dict[str, Any]:
328
+ """Generate comprehensive benchmark report.
329
+
330
+ Args:
331
+ results: List of benchmark results
332
+ output_path: Optional path to save report
333
+
334
+ Returns:
335
+ Report dictionary
336
+
337
+ """
338
+ report = {
339
+ 'summary': {
340
+ 'total_benchmarks': len(results),
341
+ 'successful_runs': sum(1 for r in results if r.success),
342
+ 'failed_runs': sum(1 for r in results if not r.success),
343
+ 'algorithms_tested': list(set(r.algorithm_name for r in results)),
344
+ 'backends_tested': list(set(r.backend_name for r in results)),
345
+ 'datasets_tested': list(set(r.dataset_name for r in results))
346
+ },
347
+ 'performance_analysis': {},
348
+ 'detailed_results': []
349
+ }
350
+
351
+ # Performance analysis
352
+ successful_results = [r for r in results if r.success]
353
+
354
+ if successful_results:
355
+ execution_times = [r.execution_time for r in successful_results]
356
+ accuracies = [r.accuracy for r in successful_results if r.accuracy is not None]
357
+ memory_usage = [r.memory_usage for r in successful_results if r.memory_usage is not None]
358
+
359
+ report['performance_analysis'] = {
360
+ 'execution_time': {
361
+ 'mean': np.mean(execution_times),
362
+ 'std': np.std(execution_times),
363
+ 'min': np.min(execution_times),
364
+ 'max': np.max(execution_times)
365
+ },
366
+ 'accuracy': {
367
+ 'mean': np.mean(accuracies) if accuracies else None,
368
+ 'std': np.std(accuracies) if accuracies else None,
369
+ 'min': np.min(accuracies) if accuracies else None,
370
+ 'max': np.max(accuracies) if accuracies else None
371
+ },
372
+ 'memory_usage': {
373
+ 'mean': np.mean(memory_usage) if memory_usage else None,
374
+ 'std': np.std(memory_usage) if memory_usage else None,
375
+ 'min': np.min(memory_usage) if memory_usage else None,
376
+ 'max': np.max(memory_usage) if memory_usage else None
377
+ }
378
+ }
379
+
380
+ # Detailed results
381
+ for result in results:
382
+ result_dict = {
383
+ 'algorithm': result.algorithm_name,
384
+ 'backend': result.backend_name,
385
+ 'dataset': result.dataset_name,
386
+ 'execution_time': result.execution_time,
387
+ 'memory_usage': result.memory_usage,
388
+ 'accuracy': result.accuracy,
389
+ 'loss': result.loss,
390
+ 'n_parameters': result.n_parameters,
391
+ 'n_qubits': result.n_qubits,
392
+ 'n_iterations': result.n_iterations,
393
+ 'success': result.success,
394
+ 'error_message': result.error_message,
395
+ 'metadata': result.metadata
396
+ }
397
+ report['detailed_results'].append(result_dict)
398
+
399
+ # Save report if path provided
400
+ if output_path:
401
+ Path(output_path).parent.mkdir(parents=True, exist_ok=True)
402
+ with open(output_path, 'w') as f:
403
+ json.dump(report, f, indent=2, default=str)
404
+
405
+ return report
406
+
407
+
408
+ def _run_single_benchmark(
409
+ algorithm: Any,
410
+ dataset_name: str,
411
+ dataset: Any,
412
+ metrics: List[str]
413
+ ) -> BenchmarkResult:
414
+ """Run single benchmark iteration."""
415
+ try:
416
+ start_time = time.time()
417
+ start_memory = _get_memory_usage()
418
+
419
+ # Fit algorithm
420
+ if hasattr(algorithm, 'fit'):
421
+ algorithm.fit(dataset['X_train'], dataset['y_train'])
422
+
423
+ # Make predictions
424
+ if hasattr(algorithm, 'predict'):
425
+ y_pred = algorithm.predict(dataset.get('X_test', dataset['X_train']))
426
+ y_true = dataset.get('y_test', dataset['y_train'])
427
+ else:
428
+ y_pred = None
429
+ y_true = None
430
+
431
+ execution_time = time.time() - start_time
432
+ memory_usage = _get_memory_usage() - start_memory if start_memory else None
433
+
434
+ # Compute metrics
435
+ accuracy = None
436
+ loss = None
437
+
438
+ if y_pred is not None and y_true is not None:
439
+ if 'accuracy' in metrics:
440
+ accuracy = np.mean(y_true == y_pred)
441
+
442
+ if 'loss' in metrics and hasattr(algorithm, 'loss'):
443
+ loss = algorithm.loss(y_true, y_pred)
444
+
445
+ # Extract algorithm info
446
+ n_parameters = getattr(algorithm, 'n_parameters', None)
447
+ n_qubits = getattr(algorithm, 'n_qubits', None)
448
+ n_iterations = getattr(algorithm, 'n_iterations', None)
449
+
450
+ result = BenchmarkResult(
451
+ algorithm_name=algorithm.__class__.__name__,
452
+ backend_name=getattr(algorithm, 'backend', {}).get('name', 'unknown'),
453
+ dataset_name=dataset_name,
454
+ execution_time=execution_time,
455
+ memory_usage=memory_usage,
456
+ accuracy=accuracy,
457
+ loss=loss,
458
+ n_parameters=n_parameters,
459
+ n_qubits=n_qubits,
460
+ n_iterations=n_iterations,
461
+ success=True,
462
+ error_message=None,
463
+ metadata={}
464
+ )
465
+
466
+ except Exception as e:
467
+ result = BenchmarkResult(
468
+ algorithm_name=algorithm.__class__.__name__,
469
+ backend_name=getattr(algorithm, 'backend', {}).get('name', 'unknown'),
470
+ dataset_name=dataset_name,
471
+ execution_time=0,
472
+ memory_usage=None,
473
+ accuracy=None,
474
+ loss=None,
475
+ n_parameters=None,
476
+ n_qubits=None,
477
+ n_iterations=None,
478
+ success=False,
479
+ error_message=str(e),
480
+ metadata={}
481
+ )
482
+
483
+ return result
484
+
485
+
486
+ def _average_benchmark_results(results: List[BenchmarkResult]) -> BenchmarkResult:
487
+ """Average multiple benchmark results."""
488
+ # Take the first result as template
489
+ template = results[0]
490
+
491
+ # Average numerical values
492
+ execution_times = [r.execution_time for r in results if r.success]
493
+ accuracies = [r.accuracy for r in results if r.success and r.accuracy is not None]
494
+ losses = [r.loss for r in results if r.success and r.loss is not None]
495
+ memory_usages = [r.memory_usage for r in results if r.success and r.memory_usage is not None]
496
+
497
+ return BenchmarkResult(
498
+ algorithm_name=template.algorithm_name,
499
+ backend_name=template.backend_name,
500
+ dataset_name=template.dataset_name,
501
+ execution_time=np.mean(execution_times) if execution_times else 0,
502
+ memory_usage=np.mean(memory_usages) if memory_usages else None,
503
+ accuracy=np.mean(accuracies) if accuracies else None,
504
+ loss=np.mean(losses) if losses else None,
505
+ n_parameters=template.n_parameters,
506
+ n_qubits=template.n_qubits,
507
+ n_iterations=template.n_iterations,
508
+ success=all(r.success for r in results),
509
+ error_message=None if all(r.success for r in results) else "Some runs failed",
510
+ metadata={'n_runs': len(results)}
511
+ )
512
+
513
+
514
+ def _get_memory_usage() -> Optional[float]:
515
+ """Get current memory usage in MB."""
516
+ if not HAS_PSUTIL:
517
+ return None
518
+
519
+ try:
520
+ process = psutil.Process()
521
+ return process.memory_info().rss / 1024 / 1024 # Convert to MB
522
+ except:
523
+ return None