superquantx 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. superquantx/__init__.py +321 -0
  2. superquantx/algorithms/__init__.py +55 -0
  3. superquantx/algorithms/base_algorithm.py +413 -0
  4. superquantx/algorithms/hybrid_classifier.py +628 -0
  5. superquantx/algorithms/qaoa.py +406 -0
  6. superquantx/algorithms/quantum_agents.py +1006 -0
  7. superquantx/algorithms/quantum_kmeans.py +575 -0
  8. superquantx/algorithms/quantum_nn.py +544 -0
  9. superquantx/algorithms/quantum_pca.py +499 -0
  10. superquantx/algorithms/quantum_svm.py +346 -0
  11. superquantx/algorithms/vqe.py +553 -0
  12. superquantx/algorithms.py +863 -0
  13. superquantx/backends/__init__.py +265 -0
  14. superquantx/backends/base_backend.py +321 -0
  15. superquantx/backends/braket_backend.py +420 -0
  16. superquantx/backends/cirq_backend.py +466 -0
  17. superquantx/backends/ocean_backend.py +491 -0
  18. superquantx/backends/pennylane_backend.py +419 -0
  19. superquantx/backends/qiskit_backend.py +451 -0
  20. superquantx/backends/simulator_backend.py +455 -0
  21. superquantx/backends/tket_backend.py +519 -0
  22. superquantx/circuits.py +447 -0
  23. superquantx/cli/__init__.py +28 -0
  24. superquantx/cli/commands.py +528 -0
  25. superquantx/cli/main.py +254 -0
  26. superquantx/client.py +298 -0
  27. superquantx/config.py +326 -0
  28. superquantx/exceptions.py +287 -0
  29. superquantx/gates.py +588 -0
  30. superquantx/logging_config.py +347 -0
  31. superquantx/measurements.py +702 -0
  32. superquantx/ml.py +936 -0
  33. superquantx/noise.py +760 -0
  34. superquantx/utils/__init__.py +83 -0
  35. superquantx/utils/benchmarking.py +523 -0
  36. superquantx/utils/classical_utils.py +575 -0
  37. superquantx/utils/feature_mapping.py +467 -0
  38. superquantx/utils/optimization.py +410 -0
  39. superquantx/utils/quantum_utils.py +456 -0
  40. superquantx/utils/visualization.py +654 -0
  41. superquantx/version.py +33 -0
  42. superquantx-0.1.0.dist-info/METADATA +365 -0
  43. superquantx-0.1.0.dist-info/RECORD +46 -0
  44. superquantx-0.1.0.dist-info/WHEEL +4 -0
  45. superquantx-0.1.0.dist-info/entry_points.txt +2 -0
  46. superquantx-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,628 @@
1
+ """Hybrid Classical-Quantum Classifier implementation.
2
+
3
+ This module provides hybrid classifiers that combine classical and quantum
4
+ machine learning components for enhanced performance and flexibility.
5
+ """
6
+
7
+ import logging
8
+ from typing import Any, Dict, List, Optional, Tuple, Union
9
+
10
+ import numpy as np
11
+ from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
12
+ from sklearn.linear_model import LogisticRegression
13
+ from sklearn.metrics import accuracy_score
14
+ from sklearn.model_selection import cross_val_score
15
+ from sklearn.preprocessing import LabelEncoder, StandardScaler
16
+ from sklearn.svm import SVC
17
+
18
+ from .base_algorithm import SupervisedQuantumAlgorithm
19
+ from .quantum_nn import QuantumNN
20
+ from .quantum_svm import QuantumSVM
21
+
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ class HybridClassifier(SupervisedQuantumAlgorithm):
26
+ """Hybrid Classical-Quantum Classifier.
27
+
28
+ This classifier combines classical and quantum machine learning algorithms
29
+ to leverage the strengths of both approaches. It can operate in different modes:
30
+ - Ensemble: Combines predictions from multiple quantum and classical models
31
+ - Sequential: Uses quantum features as input to classical models
32
+ - Voting: Majority voting among quantum and classical predictions
33
+ - Stacking: Uses meta-learner to combine quantum and classical predictions
34
+
35
+ Args:
36
+ backend: Quantum backend for quantum components
37
+ hybrid_mode: Mode of operation ('ensemble', 'sequential', 'voting', 'stacking')
38
+ quantum_algorithms: List of quantum algorithms to include
39
+ classical_algorithms: List of classical algorithms to include
40
+ quantum_weight: Weight for quantum predictions (0-1)
41
+ feature_selection: Whether to use quantum feature selection
42
+ meta_learner: Meta-learning algorithm for stacking mode
43
+ shots: Number of measurement shots
44
+ **kwargs: Additional parameters
45
+
46
+ Example:
47
+ >>> hybrid = HybridClassifier(
48
+ ... backend='pennylane',
49
+ ... hybrid_mode='ensemble',
50
+ ... quantum_algorithms=['quantum_svm', 'quantum_nn'],
51
+ ... classical_algorithms=['random_forest', 'svm']
52
+ ... )
53
+ >>> hybrid.fit(X_train, y_train)
54
+ >>> predictions = hybrid.predict(X_test)
55
+
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ backend: Union[str, Any],
61
+ hybrid_mode: str = 'ensemble',
62
+ quantum_algorithms: Optional[List[str]] = None,
63
+ classical_algorithms: Optional[List[str]] = None,
64
+ quantum_weight: float = 0.5,
65
+ feature_selection: bool = False,
66
+ meta_learner: str = 'logistic_regression',
67
+ shots: int = 1024,
68
+ normalize_data: bool = True,
69
+ **kwargs
70
+ ) -> None:
71
+ super().__init__(backend=backend, shots=shots, **kwargs)
72
+
73
+ self.hybrid_mode = hybrid_mode
74
+ self.quantum_algorithms = quantum_algorithms or ['quantum_svm']
75
+ self.classical_algorithms = classical_algorithms or ['random_forest']
76
+ self.quantum_weight = quantum_weight
77
+ self.feature_selection = feature_selection
78
+ self.meta_learner_name = meta_learner
79
+ self.normalize_data = normalize_data
80
+
81
+ # Initialize models
82
+ self.quantum_models = {}
83
+ self.classical_models = {}
84
+ self.meta_learner = None
85
+ self.feature_selector = None
86
+
87
+ # Data preprocessing
88
+ self.scaler = StandardScaler() if normalize_data else None
89
+ self.label_encoder = LabelEncoder()
90
+
91
+ # Model performance tracking
92
+ self.quantum_scores = {}
93
+ self.classical_scores = {}
94
+ self.hybrid_score = None
95
+ self.feature_importance_ = None
96
+
97
+ self._initialize_models()
98
+
99
+ logger.info(f"Initialized HybridClassifier with mode={hybrid_mode}")
100
+ logger.info(f"Quantum algorithms: {self.quantum_algorithms}")
101
+ logger.info(f"Classical algorithms: {self.classical_algorithms}")
102
+
103
+ def _initialize_models(self) -> None:
104
+ """Initialize quantum and classical models."""
105
+ # Initialize quantum models
106
+ for algo in self.quantum_algorithms:
107
+ if algo == 'quantum_svm':
108
+ self.quantum_models[algo] = QuantumSVM(
109
+ backend=self.backend,
110
+ shots=self.shots
111
+ )
112
+ elif algo == 'quantum_nn':
113
+ self.quantum_models[algo] = QuantumNN(
114
+ backend=self.backend,
115
+ shots=self.shots,
116
+ task_type='classification'
117
+ )
118
+ else:
119
+ logger.warning(f"Unknown quantum algorithm: {algo}")
120
+
121
+ # Initialize classical models
122
+ for algo in self.classical_algorithms:
123
+ if algo == 'random_forest':
124
+ self.classical_models[algo] = RandomForestClassifier(
125
+ n_estimators=100, random_state=42
126
+ )
127
+ elif algo == 'gradient_boosting':
128
+ self.classical_models[algo] = GradientBoostingClassifier(
129
+ random_state=42
130
+ )
131
+ elif algo == 'logistic_regression':
132
+ self.classical_models[algo] = LogisticRegression(
133
+ random_state=42, max_iter=1000
134
+ )
135
+ elif algo == 'svm':
136
+ self.classical_models[algo] = SVC(
137
+ probability=True, random_state=42
138
+ )
139
+ else:
140
+ logger.warning(f"Unknown classical algorithm: {algo}")
141
+
142
+ # Initialize meta-learner for stacking
143
+ if self.hybrid_mode == 'stacking':
144
+ if self.meta_learner_name == 'logistic_regression':
145
+ self.meta_learner = LogisticRegression(random_state=42)
146
+ elif self.meta_learner_name == 'random_forest':
147
+ self.meta_learner = RandomForestClassifier(n_estimators=50, random_state=42)
148
+ else:
149
+ self.meta_learner = LogisticRegression(random_state=42)
150
+
151
+ # Initialize feature selector
152
+ if self.feature_selection:
153
+ from sklearn.feature_selection import SelectKBest, f_classif
154
+ self.feature_selector = SelectKBest(score_func=f_classif, k='all')
155
+
156
+ def _apply_feature_selection(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> np.ndarray:
157
+ """Apply quantum-inspired feature selection."""
158
+ if not self.feature_selection or self.feature_selector is None:
159
+ return X
160
+
161
+ if y is not None:
162
+ # Fit and transform
163
+ X_selected = self.feature_selector.fit_transform(X, y)
164
+
165
+ # Store feature importance
166
+ if hasattr(self.feature_selector, 'scores_'):
167
+ self.feature_importance_ = self.feature_selector.scores_
168
+ else:
169
+ # Transform only
170
+ X_selected = self.feature_selector.transform(X)
171
+
172
+ logger.info(f"Feature selection: {X.shape[1]} -> {X_selected.shape[1]} features")
173
+ return X_selected
174
+
175
+ def _train_quantum_models(self, X: np.ndarray, y: np.ndarray) -> Dict[str, float]:
176
+ """Train quantum models and return their scores."""
177
+ scores = {}
178
+
179
+ for name, model in self.quantum_models.items():
180
+ try:
181
+ logger.info(f"Training quantum model: {name}")
182
+ model.fit(X, y)
183
+
184
+ # Evaluate model
185
+ predictions = model.predict(X)
186
+ score = accuracy_score(y, predictions)
187
+ scores[name] = score
188
+
189
+ logger.info(f"{name} training accuracy: {score:.4f}")
190
+
191
+ except Exception as e:
192
+ logger.error(f"Failed to train quantum model {name}: {e}")
193
+ scores[name] = 0.0
194
+
195
+ return scores
196
+
197
+ def _train_classical_models(self, X: np.ndarray, y: np.ndarray) -> Dict[str, float]:
198
+ """Train classical models and return their scores."""
199
+ scores = {}
200
+
201
+ for name, model in self.classical_models.items():
202
+ try:
203
+ logger.info(f"Training classical model: {name}")
204
+ model.fit(X, y)
205
+
206
+ # Evaluate model
207
+ predictions = model.predict(X)
208
+ score = accuracy_score(y, predictions)
209
+ scores[name] = score
210
+
211
+ logger.info(f"{name} training accuracy: {score:.4f}")
212
+
213
+ except Exception as e:
214
+ logger.error(f"Failed to train classical model {name}: {e}")
215
+ scores[name] = 0.0
216
+
217
+ return scores
218
+
219
+ def _get_base_predictions(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
220
+ """Get predictions from all base models."""
221
+ quantum_preds = []
222
+ classical_preds = []
223
+
224
+ # Get quantum predictions
225
+ for name, model in self.quantum_models.items():
226
+ try:
227
+ if hasattr(model, 'predict_proba'):
228
+ pred_proba = model.predict_proba(X)
229
+ quantum_preds.append(pred_proba)
230
+ else:
231
+ pred = model.predict(X)
232
+ # Convert to one-hot for consistency
233
+ pred_proba = np.zeros((len(pred), self.n_classes_))
234
+ pred_proba[np.arange(len(pred)), pred] = 1.0
235
+ quantum_preds.append(pred_proba)
236
+ except Exception as e:
237
+ logger.error(f"Failed to get predictions from quantum model {name}: {e}")
238
+ # Add dummy predictions
239
+ dummy_pred = np.ones((X.shape[0], self.n_classes_)) / self.n_classes_
240
+ quantum_preds.append(dummy_pred)
241
+
242
+ # Get classical predictions
243
+ for name, model in self.classical_models.items():
244
+ try:
245
+ if hasattr(model, 'predict_proba'):
246
+ pred_proba = model.predict_proba(X)
247
+ classical_preds.append(pred_proba)
248
+ else:
249
+ pred = model.predict(X)
250
+ # Convert to one-hot for consistency
251
+ pred_proba = np.zeros((len(pred), self.n_classes_))
252
+ pred_proba[np.arange(len(pred)), pred] = 1.0
253
+ classical_preds.append(pred_proba)
254
+ except Exception as e:
255
+ logger.error(f"Failed to get predictions from classical model {name}: {e}")
256
+ # Add dummy predictions
257
+ dummy_pred = np.ones((X.shape[0], self.n_classes_)) / self.n_classes_
258
+ classical_preds.append(dummy_pred)
259
+
260
+ quantum_predictions = np.array(quantum_preds) if quantum_preds else np.array([])
261
+ classical_predictions = np.array(classical_preds) if classical_preds else np.array([])
262
+
263
+ return quantum_predictions, classical_predictions
264
+
265
+ def _ensemble_predict(self, X: np.ndarray) -> np.ndarray:
266
+ """Ensemble prediction mode."""
267
+ quantum_preds, classical_preds = self._get_base_predictions(X)
268
+
269
+ # Combine predictions with weighted average
270
+ combined_pred = np.zeros((X.shape[0], self.n_classes_))
271
+
272
+ if len(quantum_preds) > 0:
273
+ quantum_avg = np.mean(quantum_preds, axis=0)
274
+ combined_pred += self.quantum_weight * quantum_avg
275
+
276
+ if len(classical_preds) > 0:
277
+ classical_avg = np.mean(classical_preds, axis=0)
278
+ combined_pred += (1 - self.quantum_weight) * classical_avg
279
+
280
+ return np.argmax(combined_pred, axis=1)
281
+
282
+ def _voting_predict(self, X: np.ndarray) -> np.ndarray:
283
+ """Voting prediction mode."""
284
+ quantum_preds, classical_preds = self._get_base_predictions(X)
285
+
286
+ all_predictions = []
287
+
288
+ # Get hard predictions
289
+ if len(quantum_preds) > 0:
290
+ for pred_proba in quantum_preds:
291
+ all_predictions.append(np.argmax(pred_proba, axis=1))
292
+
293
+ if len(classical_preds) > 0:
294
+ for pred_proba in classical_preds:
295
+ all_predictions.append(np.argmax(pred_proba, axis=1))
296
+
297
+ if not all_predictions:
298
+ return np.zeros(X.shape[0], dtype=int)
299
+
300
+ # Majority voting
301
+ all_predictions = np.array(all_predictions).T
302
+ final_predictions = []
303
+
304
+ for sample_preds in all_predictions:
305
+ unique, counts = np.unique(sample_preds, return_counts=True)
306
+ final_predictions.append(unique[np.argmax(counts)])
307
+
308
+ return np.array(final_predictions)
309
+
310
+ def _sequential_predict(self, X: np.ndarray) -> np.ndarray:
311
+ """Sequential prediction mode (quantum features -> classical models)."""
312
+ # Use quantum models to extract features
313
+ quantum_features = []
314
+
315
+ for name, model in self.quantum_models.items():
316
+ try:
317
+ if hasattr(model, 'transform'):
318
+ features = model.transform(X)
319
+ elif hasattr(model, 'decision_function'):
320
+ features = model.decision_function(X)
321
+ if len(features.shape) == 1:
322
+ features = features.reshape(-1, 1)
323
+ else:
324
+ # Use prediction probabilities as features
325
+ features = model.predict_proba(X)
326
+
327
+ quantum_features.append(features)
328
+
329
+ except Exception as e:
330
+ logger.error(f"Failed to extract features from {name}: {e}")
331
+
332
+ if not quantum_features:
333
+ logger.warning("No quantum features extracted, using original features")
334
+ quantum_feature_matrix = X
335
+ else:
336
+ quantum_feature_matrix = np.concatenate(quantum_features, axis=1)
337
+
338
+ # Use the best classical model for final prediction
339
+ best_classical = max(self.classical_scores.items(), key=lambda x: x[1])[0]
340
+ model = self.classical_models[best_classical]
341
+
342
+ return model.predict(quantum_feature_matrix)
343
+
344
+ def _stacking_predict(self, X: np.ndarray) -> np.ndarray:
345
+ """Stacking prediction mode."""
346
+ quantum_preds, classical_preds = self._get_base_predictions(X)
347
+
348
+ # Combine all predictions as meta-features
349
+ meta_features = []
350
+
351
+ if len(quantum_preds) > 0:
352
+ for pred_proba in quantum_preds:
353
+ meta_features.append(pred_proba)
354
+
355
+ if len(classical_preds) > 0:
356
+ for pred_proba in classical_preds:
357
+ meta_features.append(pred_proba)
358
+
359
+ if not meta_features:
360
+ return np.zeros(X.shape[0], dtype=int)
361
+
362
+ # Concatenate meta-features
363
+ meta_X = np.concatenate(meta_features, axis=1)
364
+
365
+ # Use meta-learner for final prediction
366
+ return self.meta_learner.predict(meta_X)
367
+
368
+ def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'HybridClassifier':
369
+ """Train the hybrid classifier.
370
+
371
+ Args:
372
+ X: Training data features
373
+ y: Training data labels
374
+ **kwargs: Additional training parameters
375
+
376
+ Returns:
377
+ Self for method chaining
378
+
379
+ """
380
+ logger.info(f"Training HybridClassifier on {X.shape[0]} samples with {X.shape[1]} features")
381
+
382
+ # Validate and preprocess data
383
+ super().fit(X, y, **kwargs)
384
+
385
+ # Normalize features
386
+ if self.normalize_data:
387
+ X = self.scaler.fit_transform(X)
388
+
389
+ # Encode labels
390
+ y_encoded = self.label_encoder.fit_transform(y)
391
+
392
+ # Apply feature selection
393
+ X_selected = self._apply_feature_selection(X, y_encoded)
394
+
395
+ # Train quantum models
396
+ self.quantum_scores = self._train_quantum_models(X_selected, y_encoded)
397
+
398
+ # Train classical models
399
+ self.classical_scores = self._train_classical_models(X_selected, y_encoded)
400
+
401
+ # Train meta-learner for stacking mode
402
+ if self.hybrid_mode == 'stacking' and self.meta_learner is not None:
403
+ logger.info("Training meta-learner for stacking")
404
+
405
+ # Get base model predictions for meta-training
406
+ quantum_preds, classical_preds = self._get_base_predictions(X_selected)
407
+
408
+ meta_features = []
409
+ if len(quantum_preds) > 0:
410
+ for pred_proba in quantum_preds:
411
+ meta_features.append(pred_proba)
412
+ if len(classical_preds) > 0:
413
+ for pred_proba in classical_preds:
414
+ meta_features.append(pred_proba)
415
+
416
+ if meta_features:
417
+ meta_X = np.concatenate(meta_features, axis=1)
418
+ self.meta_learner.fit(meta_X, y_encoded)
419
+
420
+ # Train sequential model if needed
421
+ if self.hybrid_mode == 'sequential':
422
+ # Retrain classical models with quantum features
423
+ quantum_features = []
424
+
425
+ for name, model in self.quantum_models.items():
426
+ try:
427
+ if hasattr(model, 'transform'):
428
+ features = model.transform(X_selected)
429
+ elif hasattr(model, 'decision_function'):
430
+ features = model.decision_function(X_selected)
431
+ if len(features.shape) == 1:
432
+ features = features.reshape(-1, 1)
433
+ else:
434
+ features = model.predict_proba(X_selected)
435
+
436
+ quantum_features.append(features)
437
+
438
+ except Exception as e:
439
+ logger.error(f"Failed to extract features from {name}: {e}")
440
+
441
+ if quantum_features:
442
+ quantum_feature_matrix = np.concatenate(quantum_features, axis=1)
443
+
444
+ # Retrain classical models with quantum features
445
+ for name, model in self.classical_models.items():
446
+ try:
447
+ model.fit(quantum_feature_matrix, y_encoded)
448
+ except Exception as e:
449
+ logger.error(f"Failed to retrain {name} with quantum features: {e}")
450
+
451
+ self.is_fitted = True
452
+
453
+ # Compute hybrid performance
454
+ predictions = self.predict(X)
455
+ self.hybrid_score = accuracy_score(y, predictions)
456
+
457
+ logger.info(f"Hybrid classifier training completed. Accuracy: {self.hybrid_score:.4f}")
458
+
459
+ return self
460
+
461
+ def predict(self, X: np.ndarray, **kwargs) -> np.ndarray:
462
+ """Make predictions using the hybrid classifier.
463
+
464
+ Args:
465
+ X: Input data for prediction
466
+ **kwargs: Additional prediction parameters
467
+
468
+ Returns:
469
+ Predicted labels
470
+
471
+ """
472
+ if not self.is_fitted:
473
+ raise ValueError("Model must be fitted before making predictions")
474
+
475
+ # Normalize features
476
+ if self.normalize_data:
477
+ X = self.scaler.transform(X)
478
+
479
+ # Apply feature selection
480
+ X_selected = self._apply_feature_selection(X)
481
+
482
+ # Make predictions based on hybrid mode
483
+ if self.hybrid_mode == 'ensemble':
484
+ predictions = self._ensemble_predict(X_selected)
485
+ elif self.hybrid_mode == 'voting':
486
+ predictions = self._voting_predict(X_selected)
487
+ elif self.hybrid_mode == 'sequential':
488
+ predictions = self._sequential_predict(X_selected)
489
+ elif self.hybrid_mode == 'stacking':
490
+ predictions = self._stacking_predict(X_selected)
491
+ else:
492
+ raise ValueError(f"Unknown hybrid mode: {self.hybrid_mode}")
493
+
494
+ # Decode labels
495
+ return self.label_encoder.inverse_transform(predictions)
496
+
497
+ def predict_proba(self, X: np.ndarray, **kwargs) -> np.ndarray:
498
+ """Predict class probabilities.
499
+
500
+ Args:
501
+ X: Input data for prediction
502
+ **kwargs: Additional parameters
503
+
504
+ Returns:
505
+ Predicted class probabilities
506
+
507
+ """
508
+ if not self.is_fitted:
509
+ raise ValueError("Model must be fitted before making predictions")
510
+
511
+ # Normalize features
512
+ if self.normalize_data:
513
+ X = self.scaler.transform(X)
514
+
515
+ # Apply feature selection
516
+ X_selected = self._apply_feature_selection(X)
517
+
518
+ # Get base predictions
519
+ quantum_preds, classical_preds = self._get_base_predictions(X_selected)
520
+
521
+ if self.hybrid_mode == 'ensemble':
522
+ # Weighted average of probabilities
523
+ combined_pred = np.zeros((X.shape[0], self.n_classes_))
524
+
525
+ if len(quantum_preds) > 0:
526
+ quantum_avg = np.mean(quantum_preds, axis=0)
527
+ combined_pred += self.quantum_weight * quantum_avg
528
+
529
+ if len(classical_preds) > 0:
530
+ classical_avg = np.mean(classical_preds, axis=0)
531
+ combined_pred += (1 - self.quantum_weight) * classical_avg
532
+
533
+ return combined_pred
534
+
535
+ elif self.hybrid_mode == 'stacking' and self.meta_learner is not None:
536
+ # Use meta-learner probabilities
537
+ meta_features = []
538
+
539
+ if len(quantum_preds) > 0:
540
+ for pred_proba in quantum_preds:
541
+ meta_features.append(pred_proba)
542
+ if len(classical_preds) > 0:
543
+ for pred_proba in classical_preds:
544
+ meta_features.append(pred_proba)
545
+
546
+ if meta_features:
547
+ meta_X = np.concatenate(meta_features, axis=1)
548
+ if hasattr(self.meta_learner, 'predict_proba'):
549
+ return self.meta_learner.predict_proba(meta_X)
550
+
551
+ # Fallback: convert predictions to probabilities
552
+ predictions = self.predict(X)
553
+ pred_encoded = self.label_encoder.transform(predictions)
554
+ prob_matrix = np.zeros((len(predictions), self.n_classes_))
555
+ prob_matrix[np.arange(len(predictions)), pred_encoded] = 1.0
556
+
557
+ return prob_matrix
558
+
559
+ def get_model_performance(self) -> Dict[str, Any]:
560
+ """Get detailed performance metrics for all models."""
561
+ performance = {
562
+ 'quantum_scores': self.quantum_scores.copy(),
563
+ 'classical_scores': self.classical_scores.copy(),
564
+ 'hybrid_score': self.hybrid_score,
565
+ 'hybrid_mode': self.hybrid_mode,
566
+ }
567
+
568
+ # Add quantum advantage metrics
569
+ if self.quantum_scores and self.classical_scores:
570
+ best_quantum = max(self.quantum_scores.values()) if self.quantum_scores else 0
571
+ best_classical = max(self.classical_scores.values()) if self.classical_scores else 0
572
+
573
+ performance.update({
574
+ 'best_quantum_score': best_quantum,
575
+ 'best_classical_score': best_classical,
576
+ 'quantum_advantage': best_quantum - best_classical,
577
+ 'hybrid_vs_best_quantum': self.hybrid_score - best_quantum if self.hybrid_score else 0,
578
+ 'hybrid_vs_best_classical': self.hybrid_score - best_classical if self.hybrid_score else 0,
579
+ })
580
+
581
+ return performance
582
+
583
+ def get_feature_importance(self) -> Optional[np.ndarray]:
584
+ """Get feature importance from feature selection."""
585
+ return self.feature_importance_
586
+
587
+ def cross_validate(self, X: np.ndarray, y: np.ndarray, cv: int = 5) -> Dict[str, Any]:
588
+ """Perform cross-validation on the hybrid classifier."""
589
+ if not self.is_fitted:
590
+ raise ValueError("Model must be fitted before cross-validation")
591
+
592
+ try:
593
+ scores = cross_val_score(self, X, y, cv=cv, scoring='accuracy')
594
+
595
+ return {
596
+ 'cv_scores': scores.tolist(),
597
+ 'cv_mean': np.mean(scores),
598
+ 'cv_std': np.std(scores),
599
+ 'cv_min': np.min(scores),
600
+ 'cv_max': np.max(scores),
601
+ }
602
+
603
+ except Exception as e:
604
+ logger.error(f"Cross-validation failed: {e}")
605
+ return {'error': str(e)}
606
+
607
+ def get_params(self, deep: bool = True) -> Dict[str, Any]:
608
+ """Get hybrid classifier parameters."""
609
+ params = super().get_params(deep)
610
+ params.update({
611
+ 'hybrid_mode': self.hybrid_mode,
612
+ 'quantum_algorithms': self.quantum_algorithms,
613
+ 'classical_algorithms': self.classical_algorithms,
614
+ 'quantum_weight': self.quantum_weight,
615
+ 'feature_selection': self.feature_selection,
616
+ 'meta_learner': self.meta_learner_name,
617
+ 'normalize_data': self.normalize_data,
618
+ })
619
+ return params
620
+
621
+ def set_params(self, **params) -> 'HybridClassifier':
622
+ """Set hybrid classifier parameters."""
623
+ if self.is_fitted and any(key in params for key in
624
+ ['hybrid_mode', 'quantum_algorithms', 'classical_algorithms']):
625
+ logger.warning("Changing core parameters requires refitting the model")
626
+ self.is_fitted = False
627
+
628
+ return super().set_params(**params)