superquantx 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- superquantx/__init__.py +321 -0
- superquantx/algorithms/__init__.py +55 -0
- superquantx/algorithms/base_algorithm.py +413 -0
- superquantx/algorithms/hybrid_classifier.py +628 -0
- superquantx/algorithms/qaoa.py +406 -0
- superquantx/algorithms/quantum_agents.py +1006 -0
- superquantx/algorithms/quantum_kmeans.py +575 -0
- superquantx/algorithms/quantum_nn.py +544 -0
- superquantx/algorithms/quantum_pca.py +499 -0
- superquantx/algorithms/quantum_svm.py +346 -0
- superquantx/algorithms/vqe.py +553 -0
- superquantx/algorithms.py +863 -0
- superquantx/backends/__init__.py +265 -0
- superquantx/backends/base_backend.py +321 -0
- superquantx/backends/braket_backend.py +420 -0
- superquantx/backends/cirq_backend.py +466 -0
- superquantx/backends/ocean_backend.py +491 -0
- superquantx/backends/pennylane_backend.py +419 -0
- superquantx/backends/qiskit_backend.py +451 -0
- superquantx/backends/simulator_backend.py +455 -0
- superquantx/backends/tket_backend.py +519 -0
- superquantx/circuits.py +447 -0
- superquantx/cli/__init__.py +28 -0
- superquantx/cli/commands.py +528 -0
- superquantx/cli/main.py +254 -0
- superquantx/client.py +298 -0
- superquantx/config.py +326 -0
- superquantx/exceptions.py +287 -0
- superquantx/gates.py +588 -0
- superquantx/logging_config.py +347 -0
- superquantx/measurements.py +702 -0
- superquantx/ml.py +936 -0
- superquantx/noise.py +760 -0
- superquantx/utils/__init__.py +83 -0
- superquantx/utils/benchmarking.py +523 -0
- superquantx/utils/classical_utils.py +575 -0
- superquantx/utils/feature_mapping.py +467 -0
- superquantx/utils/optimization.py +410 -0
- superquantx/utils/quantum_utils.py +456 -0
- superquantx/utils/visualization.py +654 -0
- superquantx/version.py +33 -0
- superquantx-0.1.0.dist-info/METADATA +365 -0
- superquantx-0.1.0.dist-info/RECORD +46 -0
- superquantx-0.1.0.dist-info/WHEEL +4 -0
- superquantx-0.1.0.dist-info/entry_points.txt +2 -0
- superquantx-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,544 @@
|
|
1
|
+
"""Quantum Neural Network implementation.
|
2
|
+
|
3
|
+
This module provides quantum neural network architectures for machine learning
|
4
|
+
tasks using parameterized quantum circuits as trainable layers.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, Dict, List, Union
|
9
|
+
|
10
|
+
import numpy as np
|
11
|
+
from sklearn.metrics import accuracy_score
|
12
|
+
from sklearn.preprocessing import LabelEncoder, StandardScaler
|
13
|
+
|
14
|
+
from .base_algorithm import SupervisedQuantumAlgorithm
|
15
|
+
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
class QuantumNN(SupervisedQuantumAlgorithm):
|
20
|
+
"""Quantum Neural Network for classification and regression.
|
21
|
+
|
22
|
+
This implementation uses parameterized quantum circuits as neural network
|
23
|
+
layers, with classical optimization to train the quantum parameters.
|
24
|
+
|
25
|
+
The network can be configured with different architectures:
|
26
|
+
- Pure quantum: Only quantum layers
|
27
|
+
- Hybrid: Combination of quantum and classical layers
|
28
|
+
- Variational: Variational quantum circuits with measurement
|
29
|
+
|
30
|
+
Args:
|
31
|
+
backend: Quantum backend for circuit execution
|
32
|
+
n_layers: Number of quantum layers
|
33
|
+
architecture: Network architecture ('pure', 'hybrid', 'variational')
|
34
|
+
encoding: Data encoding method ('amplitude', 'angle', 'basis')
|
35
|
+
entanglement: Entanglement pattern ('linear', 'circular', 'full')
|
36
|
+
measurement: Measurement strategy ('expectation', 'sampling', 'statevector')
|
37
|
+
optimizer: Classical optimizer for training
|
38
|
+
learning_rate: Learning rate for training
|
39
|
+
batch_size: Training batch size
|
40
|
+
shots: Number of measurement shots
|
41
|
+
**kwargs: Additional parameters
|
42
|
+
|
43
|
+
Example:
|
44
|
+
>>> qnn = QuantumNN(backend='pennylane', n_layers=3, architecture='hybrid')
|
45
|
+
>>> qnn.fit(X_train, y_train)
|
46
|
+
>>> predictions = qnn.predict(X_test)
|
47
|
+
>>> accuracy = qnn.score(X_test, y_test)
|
48
|
+
|
49
|
+
"""
|
50
|
+
|
51
|
+
def __init__(
|
52
|
+
self,
|
53
|
+
backend: Union[str, Any],
|
54
|
+
n_layers: int = 3,
|
55
|
+
architecture: str = 'hybrid',
|
56
|
+
encoding: str = 'angle',
|
57
|
+
entanglement: str = 'linear',
|
58
|
+
measurement: str = 'expectation',
|
59
|
+
optimizer: str = 'adam',
|
60
|
+
learning_rate: float = 0.01,
|
61
|
+
batch_size: int = 32,
|
62
|
+
max_epochs: int = 100,
|
63
|
+
shots: int = 1024,
|
64
|
+
task_type: str = 'classification',
|
65
|
+
**kwargs
|
66
|
+
) -> None:
|
67
|
+
super().__init__(backend=backend, shots=shots, **kwargs)
|
68
|
+
|
69
|
+
self.n_layers = n_layers
|
70
|
+
self.architecture = architecture
|
71
|
+
self.encoding = encoding
|
72
|
+
self.entanglement = entanglement
|
73
|
+
self.measurement = measurement
|
74
|
+
self.optimizer_name = optimizer
|
75
|
+
self.learning_rate = learning_rate
|
76
|
+
self.batch_size = batch_size
|
77
|
+
self.max_epochs = max_epochs
|
78
|
+
self.task_type = task_type
|
79
|
+
|
80
|
+
# Network components
|
81
|
+
self.quantum_layers = []
|
82
|
+
self.classical_layers = []
|
83
|
+
self.n_qubits = None
|
84
|
+
self.n_params = None
|
85
|
+
|
86
|
+
# Training components
|
87
|
+
self.weights = None
|
88
|
+
self.encoder = LabelEncoder() if task_type == 'classification' else None
|
89
|
+
self.scaler = StandardScaler()
|
90
|
+
self.optimizer = None
|
91
|
+
|
92
|
+
# Training history
|
93
|
+
self.loss_history = []
|
94
|
+
self.accuracy_history = []
|
95
|
+
|
96
|
+
logger.info(f"Initialized QuantumNN with {n_layers} layers, architecture={architecture}")
|
97
|
+
|
98
|
+
def _determine_qubits(self, n_features: int) -> int:
|
99
|
+
"""Determine number of qubits needed for encoding."""
|
100
|
+
if self.encoding == 'amplitude':
|
101
|
+
return max(1, int(np.ceil(np.log2(n_features))))
|
102
|
+
elif self.encoding == 'angle':
|
103
|
+
return n_features
|
104
|
+
elif self.encoding == 'basis':
|
105
|
+
return int(np.ceil(np.log2(n_features)))
|
106
|
+
else:
|
107
|
+
return n_features
|
108
|
+
|
109
|
+
def _create_encoding_layer(self, x: np.ndarray) -> Any:
|
110
|
+
"""Create data encoding quantum layer."""
|
111
|
+
try:
|
112
|
+
if hasattr(self.backend, 'create_encoding_layer'):
|
113
|
+
return self.backend.create_encoding_layer(
|
114
|
+
data=x,
|
115
|
+
encoding=self.encoding,
|
116
|
+
n_qubits=self.n_qubits
|
117
|
+
)
|
118
|
+
else:
|
119
|
+
return self._fallback_encoding(x)
|
120
|
+
except Exception as e:
|
121
|
+
logger.error(f"Failed to create encoding layer: {e}")
|
122
|
+
return self._fallback_encoding(x)
|
123
|
+
|
124
|
+
def _fallback_encoding(self, x: np.ndarray) -> Any:
|
125
|
+
"""Fallback data encoding implementation."""
|
126
|
+
logger.warning("Using fallback data encoding")
|
127
|
+
return None
|
128
|
+
|
129
|
+
def _create_variational_layer(self, params: np.ndarray, layer_idx: int) -> Any:
|
130
|
+
"""Create parameterized variational quantum layer."""
|
131
|
+
try:
|
132
|
+
if hasattr(self.backend, 'create_variational_layer'):
|
133
|
+
return self.backend.create_variational_layer(
|
134
|
+
params=params,
|
135
|
+
layer_idx=layer_idx,
|
136
|
+
entanglement=self.entanglement,
|
137
|
+
n_qubits=self.n_qubits
|
138
|
+
)
|
139
|
+
else:
|
140
|
+
return self._fallback_variational_layer(params, layer_idx)
|
141
|
+
except Exception as e:
|
142
|
+
logger.error(f"Failed to create variational layer {layer_idx}: {e}")
|
143
|
+
return self._fallback_variational_layer(params, layer_idx)
|
144
|
+
|
145
|
+
def _fallback_variational_layer(self, params: np.ndarray, layer_idx: int) -> Any:
|
146
|
+
"""Fallback variational layer implementation."""
|
147
|
+
logger.warning(f"Using fallback variational layer {layer_idx}")
|
148
|
+
return None
|
149
|
+
|
150
|
+
def _create_measurement_layer(self) -> Any:
|
151
|
+
"""Create measurement layer."""
|
152
|
+
try:
|
153
|
+
if hasattr(self.backend, 'create_measurement_layer'):
|
154
|
+
return self.backend.create_measurement_layer(
|
155
|
+
measurement=self.measurement,
|
156
|
+
n_qubits=self.n_qubits
|
157
|
+
)
|
158
|
+
else:
|
159
|
+
return self._fallback_measurement()
|
160
|
+
except Exception as e:
|
161
|
+
logger.error(f"Failed to create measurement layer: {e}")
|
162
|
+
return self._fallback_measurement()
|
163
|
+
|
164
|
+
def _fallback_measurement(self) -> Any:
|
165
|
+
"""Fallback measurement implementation."""
|
166
|
+
logger.warning("Using fallback measurement layer")
|
167
|
+
return None
|
168
|
+
|
169
|
+
def _build_network(self) -> None:
|
170
|
+
"""Build the complete quantum neural network."""
|
171
|
+
logger.info(f"Building {self.architecture} quantum neural network")
|
172
|
+
|
173
|
+
# Calculate number of parameters needed
|
174
|
+
params_per_layer = self._get_params_per_layer()
|
175
|
+
self.n_params = self.n_layers * params_per_layer
|
176
|
+
|
177
|
+
# Initialize weights
|
178
|
+
self.weights = np.random.uniform(-np.pi, np.pi, self.n_params)
|
179
|
+
|
180
|
+
# Build network layers based on architecture
|
181
|
+
if self.architecture == 'pure':
|
182
|
+
self._build_pure_quantum_network()
|
183
|
+
elif self.architecture == 'hybrid':
|
184
|
+
self._build_hybrid_network()
|
185
|
+
elif self.architecture == 'variational':
|
186
|
+
self._build_variational_network()
|
187
|
+
else:
|
188
|
+
raise ValueError(f"Unknown architecture: {self.architecture}")
|
189
|
+
|
190
|
+
def _get_params_per_layer(self) -> int:
|
191
|
+
"""Get number of parameters per quantum layer."""
|
192
|
+
if hasattr(self.backend, 'get_layer_param_count'):
|
193
|
+
return self.backend.get_layer_param_count(
|
194
|
+
n_qubits=self.n_qubits,
|
195
|
+
entanglement=self.entanglement
|
196
|
+
)
|
197
|
+
else:
|
198
|
+
# Default parameter count estimate
|
199
|
+
return 2 * self.n_qubits # RY and RZ rotations per qubit
|
200
|
+
|
201
|
+
def _build_pure_quantum_network(self) -> None:
|
202
|
+
"""Build pure quantum network (only quantum layers)."""
|
203
|
+
self.quantum_layers = []
|
204
|
+
for i in range(self.n_layers):
|
205
|
+
layer = {
|
206
|
+
'type': 'variational',
|
207
|
+
'params': slice(i * self.n_qubits * 2, (i + 1) * self.n_qubits * 2),
|
208
|
+
'layer_idx': i
|
209
|
+
}
|
210
|
+
self.quantum_layers.append(layer)
|
211
|
+
|
212
|
+
def _build_hybrid_network(self) -> None:
|
213
|
+
"""Build hybrid quantum-classical network."""
|
214
|
+
self._build_pure_quantum_network()
|
215
|
+
|
216
|
+
# Add classical layers for hybrid processing
|
217
|
+
# Always create classical layers for hybrid networks to handle output size
|
218
|
+
self.classical_layers = [
|
219
|
+
{'type': 'dense', 'units': self.n_classes_ or 1, 'activation': 'softmax' if self.task_type == 'classification' else 'linear'}
|
220
|
+
]
|
221
|
+
|
222
|
+
def _build_variational_network(self) -> None:
|
223
|
+
"""Build variational quantum circuit network."""
|
224
|
+
self._build_pure_quantum_network()
|
225
|
+
|
226
|
+
def _forward_pass(self, x: np.ndarray, weights: np.ndarray) -> np.ndarray:
|
227
|
+
"""Perform forward pass through the quantum neural network.
|
228
|
+
|
229
|
+
Args:
|
230
|
+
x: Input data
|
231
|
+
weights: Network weights
|
232
|
+
|
233
|
+
Returns:
|
234
|
+
Network output
|
235
|
+
|
236
|
+
"""
|
237
|
+
try:
|
238
|
+
if hasattr(self.backend, 'execute_qnn'):
|
239
|
+
result = self.backend.execute_qnn(
|
240
|
+
input_data=x,
|
241
|
+
weights=weights,
|
242
|
+
quantum_layers=self.quantum_layers,
|
243
|
+
classical_layers=self.classical_layers,
|
244
|
+
encoding=self.encoding,
|
245
|
+
measurement=self.measurement,
|
246
|
+
shots=self.shots
|
247
|
+
)
|
248
|
+
return result
|
249
|
+
else:
|
250
|
+
return self._fallback_forward_pass(x, weights)
|
251
|
+
except Exception as e:
|
252
|
+
logger.error(f"Forward pass failed: {e}")
|
253
|
+
return self._fallback_forward_pass(x, weights)
|
254
|
+
|
255
|
+
def _fallback_forward_pass(self, x: np.ndarray, weights: np.ndarray) -> np.ndarray:
|
256
|
+
"""Fallback forward pass implementation."""
|
257
|
+
logger.warning("Using fallback forward pass")
|
258
|
+
batch_size = x.shape[0]
|
259
|
+
output_size = self.n_classes_ if self.task_type == 'classification' else 1
|
260
|
+
return np.random.random((batch_size, output_size))
|
261
|
+
|
262
|
+
def _compute_loss(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
|
263
|
+
"""Compute loss function."""
|
264
|
+
if self.task_type == 'classification':
|
265
|
+
# Cross-entropy loss
|
266
|
+
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15) # Avoid log(0)
|
267
|
+
if len(y_true.shape) == 1:
|
268
|
+
# Convert to one-hot if needed
|
269
|
+
y_true_oh = np.zeros((len(y_true), self.n_classes_))
|
270
|
+
y_true_oh[np.arange(len(y_true)), y_true.astype(int)] = 1
|
271
|
+
y_true = y_true_oh
|
272
|
+
return -np.mean(np.sum(y_true * np.log(y_pred), axis=1))
|
273
|
+
else:
|
274
|
+
# Mean squared error for regression
|
275
|
+
return np.mean((y_true - y_pred.flatten()) ** 2)
|
276
|
+
|
277
|
+
def _compute_gradients(self, x: np.ndarray, y_true: np.ndarray, weights: np.ndarray) -> np.ndarray:
|
278
|
+
"""Compute gradients using parameter-shift rule."""
|
279
|
+
gradients = np.zeros_like(weights)
|
280
|
+
shift = np.pi / 2
|
281
|
+
|
282
|
+
for i in range(len(weights)):
|
283
|
+
# Forward shift
|
284
|
+
weights_plus = weights.copy()
|
285
|
+
weights_plus[i] += shift
|
286
|
+
y_pred_plus = self._forward_pass(x, weights_plus)
|
287
|
+
loss_plus = self._compute_loss(y_true, y_pred_plus)
|
288
|
+
|
289
|
+
# Backward shift
|
290
|
+
weights_minus = weights.copy()
|
291
|
+
weights_minus[i] -= shift
|
292
|
+
y_pred_minus = self._forward_pass(x, weights_minus)
|
293
|
+
loss_minus = self._compute_loss(y_true, y_pred_minus)
|
294
|
+
|
295
|
+
# Gradient via parameter-shift rule
|
296
|
+
gradients[i] = 0.5 * (loss_plus - loss_minus)
|
297
|
+
|
298
|
+
return gradients
|
299
|
+
|
300
|
+
def _update_weights(self, gradients: np.ndarray) -> None:
|
301
|
+
"""Update weights using optimizer."""
|
302
|
+
if self.optimizer_name == 'sgd':
|
303
|
+
self.weights -= self.learning_rate * gradients
|
304
|
+
elif self.optimizer_name == 'adam':
|
305
|
+
# Simplified Adam optimizer
|
306
|
+
if not hasattr(self, 'adam_m'):
|
307
|
+
self.adam_m = np.zeros_like(self.weights)
|
308
|
+
self.adam_v = np.zeros_like(self.weights)
|
309
|
+
self.adam_t = 0
|
310
|
+
|
311
|
+
self.adam_t += 1
|
312
|
+
beta1, beta2 = 0.9, 0.999
|
313
|
+
|
314
|
+
self.adam_m = beta1 * self.adam_m + (1 - beta1) * gradients
|
315
|
+
self.adam_v = beta2 * self.adam_v + (1 - beta2) * gradients**2
|
316
|
+
|
317
|
+
m_hat = self.adam_m / (1 - beta1**self.adam_t)
|
318
|
+
v_hat = self.adam_v / (1 - beta2**self.adam_t)
|
319
|
+
|
320
|
+
self.weights -= self.learning_rate * m_hat / (np.sqrt(v_hat) + 1e-8)
|
321
|
+
else:
|
322
|
+
# Default: simple gradient descent
|
323
|
+
self.weights -= self.learning_rate * gradients
|
324
|
+
|
325
|
+
def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'QuantumNN':
|
326
|
+
"""Train the quantum neural network.
|
327
|
+
|
328
|
+
Args:
|
329
|
+
X: Training data features
|
330
|
+
y: Training data labels
|
331
|
+
**kwargs: Additional training parameters
|
332
|
+
|
333
|
+
Returns:
|
334
|
+
Self for method chaining
|
335
|
+
|
336
|
+
"""
|
337
|
+
logger.info(f"Training QuantumNN on {X.shape[0]} samples with {X.shape[1]} features")
|
338
|
+
|
339
|
+
# Validate and preprocess data
|
340
|
+
super().fit(X, y, **kwargs)
|
341
|
+
|
342
|
+
# Set number of classes for classification BEFORE building network
|
343
|
+
if self.task_type == 'classification':
|
344
|
+
unique_classes = np.unique(y)
|
345
|
+
self.n_classes_ = len(unique_classes)
|
346
|
+
logger.info(f"Detected {self.n_classes_} classes for classification: {unique_classes}")
|
347
|
+
|
348
|
+
# Scale features
|
349
|
+
X = self.scaler.fit_transform(X)
|
350
|
+
|
351
|
+
# Encode labels for classification after setting n_classes_
|
352
|
+
if self.task_type == 'classification' and self.encoder:
|
353
|
+
y = self.encoder.fit_transform(y)
|
354
|
+
|
355
|
+
# Determine network architecture
|
356
|
+
self.n_qubits = self._determine_qubits(X.shape[1])
|
357
|
+
self._build_network()
|
358
|
+
|
359
|
+
# Reset training history
|
360
|
+
self.loss_history = []
|
361
|
+
self.accuracy_history = []
|
362
|
+
|
363
|
+
logger.info(f"Training network with {self.n_qubits} qubits and {self.n_params} parameters")
|
364
|
+
|
365
|
+
# Training loop
|
366
|
+
for epoch in range(self.max_epochs):
|
367
|
+
epoch_losses = []
|
368
|
+
epoch_accuracies = []
|
369
|
+
|
370
|
+
# Mini-batch training
|
371
|
+
for i in range(0, len(X), self.batch_size):
|
372
|
+
X_batch = X[i:i + self.batch_size]
|
373
|
+
y_batch = y[i:i + self.batch_size]
|
374
|
+
|
375
|
+
# Forward pass
|
376
|
+
y_pred = self._forward_pass(X_batch, self.weights)
|
377
|
+
|
378
|
+
# Compute loss
|
379
|
+
loss = self._compute_loss(y_batch, y_pred)
|
380
|
+
epoch_losses.append(loss)
|
381
|
+
|
382
|
+
# Compute accuracy for classification
|
383
|
+
if self.task_type == 'classification':
|
384
|
+
y_pred_labels = np.argmax(y_pred, axis=1)
|
385
|
+
accuracy = accuracy_score(y_batch, y_pred_labels)
|
386
|
+
epoch_accuracies.append(accuracy)
|
387
|
+
|
388
|
+
# Compute gradients and update weights
|
389
|
+
gradients = self._compute_gradients(X_batch, y_batch, self.weights)
|
390
|
+
self._update_weights(gradients)
|
391
|
+
|
392
|
+
# Record epoch statistics
|
393
|
+
epoch_loss = np.mean(epoch_losses)
|
394
|
+
self.loss_history.append(epoch_loss)
|
395
|
+
|
396
|
+
if self.task_type == 'classification' and epoch_accuracies:
|
397
|
+
epoch_accuracy = np.mean(epoch_accuracies)
|
398
|
+
self.accuracy_history.append(epoch_accuracy)
|
399
|
+
|
400
|
+
if epoch % 10 == 0:
|
401
|
+
logger.info(f"Epoch {epoch}: Loss = {epoch_loss:.4f}, Accuracy = {epoch_accuracy:.4f}")
|
402
|
+
else:
|
403
|
+
if epoch % 10 == 0:
|
404
|
+
logger.info(f"Epoch {epoch}: Loss = {epoch_loss:.4f}")
|
405
|
+
|
406
|
+
# Early stopping check
|
407
|
+
if len(self.loss_history) > 10 and self._check_early_stopping():
|
408
|
+
logger.info(f"Early stopping at epoch {epoch}")
|
409
|
+
break
|
410
|
+
|
411
|
+
self.is_fitted = True
|
412
|
+
|
413
|
+
# Final training statistics
|
414
|
+
final_loss = self.loss_history[-1]
|
415
|
+
logger.info(f"Training completed. Final loss: {final_loss:.4f}")
|
416
|
+
|
417
|
+
return self
|
418
|
+
|
419
|
+
def _check_early_stopping(self, patience: int = 10, min_delta: float = 1e-4) -> bool:
|
420
|
+
"""Check if training should stop early."""
|
421
|
+
if len(self.loss_history) < patience + 1:
|
422
|
+
return False
|
423
|
+
|
424
|
+
recent_losses = self.loss_history[-patience-1:]
|
425
|
+
best_loss = min(recent_losses[:-1])
|
426
|
+
current_loss = recent_losses[-1]
|
427
|
+
|
428
|
+
return (best_loss - current_loss) < min_delta
|
429
|
+
|
430
|
+
def predict(self, X: np.ndarray, **kwargs) -> np.ndarray:
|
431
|
+
"""Make predictions using the trained quantum neural network.
|
432
|
+
|
433
|
+
Args:
|
434
|
+
X: Input data for prediction
|
435
|
+
**kwargs: Additional prediction parameters
|
436
|
+
|
437
|
+
Returns:
|
438
|
+
Predicted labels or values
|
439
|
+
|
440
|
+
"""
|
441
|
+
if not self.is_fitted:
|
442
|
+
raise ValueError("Model must be fitted before making predictions")
|
443
|
+
|
444
|
+
# Scale features
|
445
|
+
X = self.scaler.transform(X)
|
446
|
+
|
447
|
+
# Forward pass
|
448
|
+
y_pred = self._forward_pass(X, self.weights)
|
449
|
+
|
450
|
+
if self.task_type == 'classification':
|
451
|
+
# Return class labels
|
452
|
+
predictions = np.argmax(y_pred, axis=1)
|
453
|
+
if self.encoder:
|
454
|
+
predictions = self.encoder.inverse_transform(predictions)
|
455
|
+
return predictions
|
456
|
+
else:
|
457
|
+
# Return continuous values for regression
|
458
|
+
return y_pred.flatten()
|
459
|
+
|
460
|
+
def predict_proba(self, X: np.ndarray, **kwargs) -> np.ndarray:
|
461
|
+
"""Predict class probabilities.
|
462
|
+
|
463
|
+
Args:
|
464
|
+
X: Input data for prediction
|
465
|
+
**kwargs: Additional parameters
|
466
|
+
|
467
|
+
Returns:
|
468
|
+
Predicted class probabilities
|
469
|
+
|
470
|
+
"""
|
471
|
+
if self.task_type != 'classification':
|
472
|
+
raise ValueError("predict_proba only available for classification tasks")
|
473
|
+
|
474
|
+
if not self.is_fitted:
|
475
|
+
raise ValueError("Model must be fitted before making predictions")
|
476
|
+
|
477
|
+
# Scale features
|
478
|
+
X = self.scaler.transform(X)
|
479
|
+
|
480
|
+
# Forward pass returns probabilities for classification
|
481
|
+
return self._forward_pass(X, self.weights)
|
482
|
+
|
483
|
+
def get_circuit_depth(self) -> int:
|
484
|
+
"""Get the depth of the quantum circuit."""
|
485
|
+
if hasattr(self.backend, 'get_circuit_depth'):
|
486
|
+
return self.backend.get_circuit_depth(self.quantum_layers)
|
487
|
+
else:
|
488
|
+
return self.n_layers * 2 # Estimate
|
489
|
+
|
490
|
+
def get_training_history(self) -> Dict[str, List[float]]:
|
491
|
+
"""Get training history."""
|
492
|
+
history = {'loss': self.loss_history}
|
493
|
+
if self.accuracy_history:
|
494
|
+
history['accuracy'] = self.accuracy_history
|
495
|
+
return history
|
496
|
+
|
497
|
+
def analyze_expressivity(self) -> Dict[str, Any]:
|
498
|
+
"""Analyze the expressivity of the quantum neural network."""
|
499
|
+
analysis = {
|
500
|
+
'n_qubits': self.n_qubits,
|
501
|
+
'n_layers': self.n_layers,
|
502
|
+
'n_parameters': self.n_params,
|
503
|
+
'circuit_depth': self.get_circuit_depth(),
|
504
|
+
'entanglement_pattern': self.entanglement,
|
505
|
+
'encoding_method': self.encoding,
|
506
|
+
}
|
507
|
+
|
508
|
+
# Estimate expressivity metrics
|
509
|
+
analysis.update({
|
510
|
+
'parameter_space_dimension': self.n_params,
|
511
|
+
'hilbert_space_dimension': 2**self.n_qubits,
|
512
|
+
'expressivity_ratio': self.n_params / (2**self.n_qubits),
|
513
|
+
})
|
514
|
+
|
515
|
+
return analysis
|
516
|
+
|
517
|
+
def get_params(self, deep: bool = True) -> Dict[str, Any]:
|
518
|
+
"""Get quantum neural network parameters."""
|
519
|
+
params = super().get_params(deep)
|
520
|
+
params.update({
|
521
|
+
'n_layers': self.n_layers,
|
522
|
+
'architecture': self.architecture,
|
523
|
+
'encoding': self.encoding,
|
524
|
+
'entanglement': self.entanglement,
|
525
|
+
'measurement': self.measurement,
|
526
|
+
'optimizer': self.optimizer_name,
|
527
|
+
'learning_rate': self.learning_rate,
|
528
|
+
'batch_size': self.batch_size,
|
529
|
+
'max_epochs': self.max_epochs,
|
530
|
+
'task_type': self.task_type,
|
531
|
+
})
|
532
|
+
return params
|
533
|
+
|
534
|
+
def set_params(self, **params) -> 'QuantumNN':
|
535
|
+
"""Set quantum neural network parameters."""
|
536
|
+
if self.is_fitted and any(key in params for key in
|
537
|
+
['n_layers', 'architecture', 'encoding', 'entanglement']):
|
538
|
+
logger.warning("Changing core parameters requires refitting the model")
|
539
|
+
self.is_fitted = False
|
540
|
+
|
541
|
+
return super().set_params(**params)
|
542
|
+
|
543
|
+
# Alias for backwards compatibility
|
544
|
+
QuantumNeuralNetwork = QuantumNN
|