lazyqml 2.0.5__py2.py3-none-any.whl → 3.0.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. lazyqml/Factories/Circuits/AmplitudeEmbedding.py +1 -1
  2. lazyqml/Factories/Circuits/HCzRx.py +1 -1
  3. lazyqml/Factories/Circuits/HardwareEfficient.py +1 -1
  4. lazyqml/Factories/Circuits/RxEmbedding.py +1 -1
  5. lazyqml/Factories/Circuits/RyEmbedding.py +1 -1
  6. lazyqml/Factories/Circuits/RzEmbedding.py +1 -1
  7. lazyqml/Factories/Circuits/TreeTensor.py +1 -1
  8. lazyqml/Factories/Circuits/TwoLocal.py +1 -1
  9. lazyqml/Factories/Circuits/ZzEmbedding.py +1 -1
  10. lazyqml/Factories/Circuits/fCircuits.py +10 -10
  11. lazyqml/Factories/Dispatchers/Dispatcher.py +264 -85
  12. lazyqml/Factories/Models/Hybrid.py +460 -0
  13. lazyqml/Factories/Models/QNNBag.py +6 -6
  14. lazyqml/Factories/Models/QNNTorch.py +8 -8
  15. lazyqml/Factories/Models/QSVM.py +3 -3
  16. lazyqml/Factories/Models/_QNNPennylane.py +4 -4
  17. lazyqml/Factories/Models/fModels.py +4 -4
  18. lazyqml/Factories/Preprocessing/Pca.py +2 -2
  19. lazyqml/Factories/Preprocessing/Sanitizer.py +2 -2
  20. lazyqml/Factories/Preprocessing/fPreprocessing.py +5 -24
  21. lazyqml/Global/globalEnums.py +3 -1
  22. lazyqml/Interfaces/iAnsatz.py +1 -1
  23. lazyqml/Utils/Utils.py +203 -84
  24. lazyqml/Utils/Validator.py +4 -7
  25. lazyqml/__init__.py +1 -1
  26. lazyqml/lazyqml.py +54 -49
  27. lazyqml-3.0.0.dist-info/LICENSE +21 -0
  28. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/METADATA +48 -35
  29. lazyqml-3.0.0.dist-info/RECORD +40 -0
  30. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/WHEEL +1 -1
  31. lazyqml/.lazyqmlP.py +0 -293
  32. lazyqml/.lazyqmlVote.py +0 -303
  33. lazyqml/Factories/Circuits/_Qkernel.py +0 -16
  34. lazyqml/Factories/Circuits/_Qnn.py +0 -17
  35. lazyqml/Factories/Dispatchers/DispatcherCV.py +0 -143
  36. lazyqml/Factories/Dispatchers/DispatcherNumba.py +0 -226
  37. lazyqml/Factories/Dispatchers/_Dispatcher.py +0 -188
  38. lazyqml/Factories/Dispatchers/_DispatcherMultiprocessing.py +0 -201
  39. lazyqml/Factories/Dispatchers/_QNNBagdispatcher.py +0 -2
  40. lazyqml/Factories/Dispatchers/_QNNdispatcher.py +0 -2
  41. lazyqml/Factories/Dispatchers/_QSVMdispatcher.py +0 -112
  42. lazyqml/Factories/Dispatchers/__Dispatcher.py +0 -193
  43. lazyqml/Factories/Preprocessing/_PcaAmp.py +0 -22
  44. lazyqml/Factories/Preprocessing/_PcaTree.py +0 -22
  45. lazyqml/Factories/Preprocessing/_PcaTreeAmp.py +0 -22
  46. lazyqml/Lanza copy.sh +0 -32
  47. lazyqml/Lanza.sh +0 -21
  48. lazyqml/mem.py +0 -85
  49. lazyqml-2.0.5.dist-info/RECORD +0 -56
  50. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/AUTHORS.rst +0 -0
  51. /lazyqml-2.0.5.dist-info/LICENSE → /lazyqml-3.0.0.dist-info/LICENSE copy +0 -0
  52. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/entry_points.txt +0 -0
  53. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,460 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import pennylane as qml
5
+ from time import time
6
+
7
+ import sys
8
+ sys.path.append('/home/diego/LazyQML/lazyqml/')
9
+ from Global.globalEnums import *
10
+ from Factories.Circuits.fCircuits import CircuitFactory
11
+ from Utils.Utils import printer
12
+
13
+
14
+ # class HybridQuantumClassicalModel(nn.Module):
15
+ # def __init__(self,
16
+ # classical_model,
17
+ # nqubits,
18
+ # backend=Backend.lightningQubit,
19
+ # ansatz='default',
20
+ # embedding='default',
21
+ # n_class=2,
22
+ # layers=1,
23
+ # shots=1000,
24
+ # lr=0.01,
25
+ # seed=1234,
26
+ # fine_tune_classical=False):
27
+ # super().__init__()
28
+
29
+ # # Classical model configuration
30
+ # self.classical_model = classical_model
31
+ # self.fine_tune_classical = fine_tune_classical
32
+
33
+ # # Freeze or unfreeze classical model parameters
34
+ # for param in self.classical_model.parameters():
35
+ # param.requires_grad = fine_tune_classical
36
+
37
+ # # Quantum Neural Network configuration
38
+ # self.nqubits = nqubits
39
+ # self.backend = backend
40
+ # self.ansatz = ansatz
41
+ # self.embedding = embedding
42
+ # self.n_class = n_class
43
+ # self.layers = layers
44
+ # self.shots = shots
45
+ # self.lr = lr
46
+
47
+ # # Initialize quantum device
48
+ # self.deviceQ = qml.device(backend.value, wires=nqubits, seed=seed) if backend != Backend.lightningGPU else qml.device(backend.value, wires=nqubits)
49
+
50
+ # # Circuit factory
51
+ # self.circuit_factory = CircuitFactory(nqubits, nlayers=layers)
52
+
53
+ # # Build quantum circuit
54
+ # self._build_quantum_circuit()
55
+
56
+ # # Determine classical output dimension
57
+ # classical_output_dim = self._get_classical_output_dim()
58
+
59
+ # # Bridge layer
60
+ # self.bridge = nn.Linear(classical_output_dim, nqubits)
61
+
62
+ # # Final classification layer
63
+ # if n_class == 2:
64
+ # self.classifier = nn.Linear(1, 1)
65
+ # self.criterion = nn.BCEWithLogitsLoss()
66
+ # else:
67
+ # self.classifier = nn.Linear(1, n_class)
68
+ # self.criterion = nn.CrossEntropyLoss()
69
+
70
+ # def _get_classical_output_dim(self):
71
+ # with torch.no_grad():
72
+ # dummy_input = torch.zeros((1,) + self.classical_model.input_shape)
73
+ # classical_output = self.classical_model(dummy_input)
74
+ # return classical_output.numel()
75
+
76
+ # def _build_quantum_circuit(self):
77
+ # ansatz = self.circuit_factory.GetAnsatzCircuit(self.ansatz)
78
+ # embedding = self.circuit_factory.GetEmbeddingCircuit(self.embedding)
79
+
80
+ # @qml.qnode(self.deviceQ, interface='torch', diff_method='adjoint')
81
+ # def quantum_circuit(x, theta):
82
+ # embedding.getCircuit()(x, wires=range(self.nqubits))
83
+ # ansatz.getCircuit()(theta, wires=range(self.nqubits))
84
+
85
+ # return qml.expval(qml.PauliZ(0))
86
+
87
+ # self.quantum_circuit = quantum_circuit
88
+
89
+ # # Initialize quantum parameters
90
+ # self.quantum_params = nn.Parameter(torch.randn(self.layers * ansatz.getParameters()))
91
+
92
+ # def forward(self, x):
93
+ # # Extract features from classical model
94
+ # classical_features = self.classical_model(x)
95
+
96
+ # # Flatten and ensure 2D tensor
97
+ # classical_features = classical_features.view(x.size(0), -1)
98
+
99
+ # # Bridge layer transformation
100
+ # bridged_features = self.bridge(classical_features)
101
+
102
+ # # Quantum circuit processing
103
+ # qnn_output = torch.stack([
104
+ # self.quantum_circuit(feat, self.quantum_params)
105
+ # for feat in bridged_features
106
+ # ]).unsqueeze(1)
107
+
108
+ # return self.classifier(qnn_output).squeeze()
109
+
110
+ # def predict(self, X):
111
+ # with torch.no_grad():
112
+ # # Ensure input is converted to a PyTorch tensor
113
+ # if not isinstance(X, torch.Tensor):
114
+ # X = torch.tensor(X, dtype=torch.float32)
115
+
116
+ # outputs = self.forward(X)
117
+ # return (torch.sigmoid(outputs) > 0.5).float()
118
+
119
+
120
+ # def fit(self, X, y, batch_size=32, epochs=10, lr=None):
121
+ # # Use provided learning rate or default to class initialization
122
+ # learning_rate = lr if lr is not None else self.lr
123
+
124
+ # # Prepare data
125
+ # X_train = torch.tensor(X, dtype=torch.float32)
126
+ # y_train = torch.tensor(y, dtype=torch.long if self.n_class > 2 else torch.float32)
127
+
128
+ # # Create data loader
129
+ # data_loader = torch.utils.data.DataLoader(
130
+ # list(zip(X_train, y_train)), batch_size=batch_size, shuffle=True
131
+ # )
132
+
133
+ # # Collect trainable parameters
134
+ # params_to_optimize = []
135
+ # if self.fine_tune_classical:
136
+ # params_to_optimize.extend(self.classical_model.parameters())
137
+ # params_to_optimize.extend(list(self.bridge.parameters()))
138
+ # params_to_optimize.append(self.quantum_params)
139
+ # params_to_optimize.extend(list(self.classifier.parameters()))
140
+
141
+ # # Optimizer
142
+ # optimizer = optim.Adam(params_to_optimize, lr=learning_rate)
143
+
144
+ # # Training loop
145
+ # start_time = time()
146
+ # for epoch in range(epochs):
147
+ # epoch_loss = 0.0
148
+ # for batch_X, batch_y in data_loader:
149
+ # optimizer.zero_grad()
150
+
151
+ # # Forward pass
152
+ # predictions = self.forward(batch_X)
153
+ # loss = self.criterion(predictions, batch_y)
154
+
155
+ # # Backward pass
156
+ # loss.backward()
157
+ # optimizer.step()
158
+
159
+ # epoch_loss += loss.item()
160
+
161
+ # # Print epoch progress
162
+ # printer.print(f"\t\tEpoch {epoch+1}/{epochs}, Loss: {epoch_loss/len(data_loader):.4f}")
163
+
164
+ # printer.print(f"\t\tTraining completed in {time() - start_time:.2f} seconds")
165
+
166
+ class HybridQuantumClassicalModel(nn.Module):
167
+ def __init__(self,
168
+ classical_model,
169
+ nqubits,
170
+ backend=Backend.lightningQubit,
171
+ ansatz='default',
172
+ embedding='default',
173
+ n_class=2,
174
+ layers=1,
175
+ shots=1000,
176
+ lr=0.01,
177
+ seed=1234,
178
+ fine_tune_classical=False,
179
+ input_shape=None): # Add input_shape argument
180
+ super().__init__()
181
+
182
+ self.classical_model = classical_model
183
+ self.fine_tune_classical = fine_tune_classical
184
+
185
+ # Freeze or unfreeze classical model parameters
186
+ for param in self.classical_model.parameters():
187
+ param.requires_grad = fine_tune_classical
188
+
189
+ # Quantum Neural Network configuration
190
+ self.nqubits = nqubits
191
+ self.backend = backend
192
+ self.ansatz = ansatz
193
+ self.embedding = embedding
194
+ self.n_class = n_class
195
+ self.layers = layers
196
+ self.shots = shots
197
+ self.lr = lr
198
+
199
+ # Initialize quantum device
200
+ self.deviceQ = qml.device(backend.value, wires=nqubits, seed=seed) if backend != Backend.lightningGPU else qml.device(backend.value, wires=nqubits)
201
+
202
+ # Circuit factory
203
+ self.circuit_factory = CircuitFactory(nqubits, nlayers=layers)
204
+
205
+ # Build quantum circuit
206
+ self._build_quantum_circuit()
207
+
208
+ # Determine classical output dimension
209
+ classical_output_dim = self._infer_classical_output_dim(input_shape) # Pass input_shape to inference method
210
+
211
+ # Bridge layer
212
+ self.bridge = nn.Linear(classical_output_dim, nqubits)
213
+
214
+ # Final classification layer
215
+ if n_class == 2:
216
+ self.classifier = nn.Linear(1, 1)
217
+ self.criterion = nn.BCEWithLogitsLoss()
218
+ else:
219
+ self.classifier = nn.Linear(1, n_class)
220
+ self.criterion = nn.CrossEntropyLoss()
221
+
222
+ def _infer_classical_output_dim(self, input_shape):
223
+ # Use explicit input shape if provided
224
+ if input_shape is not None:
225
+ dummy_input = torch.zeros((1,) + input_shape)
226
+ else:
227
+ # Attempt to infer the input shape dynamically
228
+ dummy_input = self._infer_classical_input_shape()
229
+ if dummy_input is None:
230
+ raise ValueError("Unable to infer the input shape of the classical model.")
231
+
232
+ with torch.no_grad():
233
+ classical_output = self.classical_model(dummy_input)
234
+ return classical_output.numel()
235
+
236
+ def _infer_classical_input_shape(self):
237
+ # This method remains for automatic inference, but we now allow for manual override.
238
+ # Here, you can attempt more advanced inference if desired.
239
+ try:
240
+ # Infer input shape for feedforward networks
241
+ return (1, self.classical_model.input_shape[0])
242
+ except AttributeError:
243
+ return None
244
+
245
+ def _build_quantum_circuit(self):
246
+ ansatz = self.circuit_factory.GetAnsatzCircuit(self.ansatz)
247
+ embedding = self.circuit_factory.GetEmbeddingCircuit(self.embedding)
248
+
249
+ @qml.qnode(self.deviceQ, interface='torch', diff_method='adjoint')
250
+ def quantum_circuit(x, theta):
251
+ embedding.getCircuit()(x, wires=range(self.nqubits))
252
+ ansatz.getCircuit()(theta, wires=range(self.nqubits))
253
+
254
+ return qml.expval(qml.PauliZ(0))
255
+
256
+ self.quantum_circuit = quantum_circuit
257
+
258
+ # Initialize quantum parameters
259
+ self.quantum_params = nn.Parameter(torch.randn(self.layers * ansatz.getParameters()))
260
+
261
+ def forward(self, x):
262
+ # Extract features from classical model
263
+ classical_features = self.classical_model(x)
264
+
265
+ # Handle LSTM outputs if they are 3D tensors
266
+ if classical_features.dim() == 3:
267
+ # Use the last time step
268
+ classical_features = classical_features[:, -1, :]
269
+ # Or flatten the entire sequence
270
+ # classical_features = classical_features.reshape(x.size(0), -1)
271
+
272
+ # Ensure the tensor is 2D
273
+ classical_features = classical_features.view(x.size(0), -1)
274
+
275
+ # Bridge layer transformation
276
+ bridged_features = self.bridge(classical_features)
277
+
278
+ # Quantum circuit processing
279
+ qnn_output = torch.stack([
280
+ self.quantum_circuit(feat, self.quantum_params)
281
+ for feat in bridged_features
282
+ ]).unsqueeze(1)
283
+
284
+ return self.classifier(qnn_output).squeeze()
285
+
286
+ def predict(self, X):
287
+ with torch.no_grad():
288
+ if not isinstance(X, torch.Tensor):
289
+ X = torch.tensor(X, dtype=torch.float32)
290
+ outputs = self.forward(X)
291
+ return (torch.sigmoid(outputs) > 0.5).float()
292
+
293
+ def fit(self, X, y, batch_size=32, epochs=10, lr=None):
294
+ learning_rate = lr if lr is not None else self.lr
295
+ X_train = torch.tensor(X, dtype=torch.float32)
296
+ y_train = torch.tensor(y, dtype=torch.long if self.n_class > 2 else torch.float32)
297
+
298
+ data_loader = torch.utils.data.DataLoader(
299
+ list(zip(X_train, y_train)), batch_size=batch_size, shuffle=True
300
+ )
301
+
302
+ params_to_optimize = []
303
+ if self.fine_tune_classical:
304
+ params_to_optimize.extend(self.classical_model.parameters())
305
+ params_to_optimize.extend(list(self.bridge.parameters()))
306
+ params_to_optimize.append(self.quantum_params)
307
+ params_to_optimize.extend(list(self.classifier.parameters()))
308
+
309
+ optimizer = optim.Adam(params_to_optimize, lr=learning_rate)
310
+ start_time = time()
311
+ for epoch in range(epochs):
312
+ epoch_loss = 0.0
313
+ for batch_X, batch_y in data_loader:
314
+ optimizer.zero_grad()
315
+ predictions = self.forward(batch_X)
316
+ loss = self.criterion(predictions, batch_y)
317
+ loss.backward()
318
+ optimizer.step()
319
+ epoch_loss += loss.item()
320
+ printer.print(f"\t\tEpoch {epoch+1}/{epochs}, Loss: {epoch_loss/len(data_loader):.4f}")
321
+ printer.print(f"\t\tTraining completed in {time() - start_time:.2f} seconds")
322
+
323
+
324
+ """
325
+ Example of use:
326
+ """
327
+
328
+ class SimpleNN(nn.Module):
329
+ def __init__(self, input_shape, output_classes):
330
+ super().__init__()
331
+ self.input_hape = input_shape
332
+ self.output_classes = output_classes
333
+ self.layers = nn.Sequential(
334
+ nn.Linear(input_shape[0], 512),
335
+ nn.ReLU(),
336
+ nn.Linear(512, 512),
337
+ nn.ReLU(),
338
+ nn.Linear(512, 512),
339
+ nn.ReLU(),
340
+ nn.Linear(512, 256),
341
+ nn.ReLU(),
342
+ nn.Linear(256, 32),
343
+ nn.ReLU()
344
+ )
345
+
346
+ # Final output layer (number of classes)
347
+ self.output_layer = nn.Linear(32, self.output_classes)
348
+
349
+ def forward(self, x):
350
+ # Ensure x is flattened into [batch_size, features]
351
+ x = x.view(x.size(0), -1)
352
+ x = self.layers(x)
353
+
354
+ # Output layer to produce the class logits (for softmax or other loss functions)
355
+ x = self.output_layer(x)
356
+ return x
357
+
358
+ class CNNModel(nn.Module):
359
+ def __init__(self, input_channels, num_classes):
360
+ super(CNNModel, self).__init__()
361
+ self.conv = nn.Sequential(
362
+ nn.Conv2d(input_channels, 16, kernel_size=3, stride=1, padding=1), # Ensure padding
363
+ nn.ReLU(),
364
+ nn.MaxPool2d(kernel_size=2, stride=2), # Pooling reduces spatial dimensions
365
+ )
366
+ self.fc = nn.Sequential(
367
+ nn.Flatten(),
368
+ nn.Linear(16 * 1 * 1, 128), # Match flattened size
369
+ nn.ReLU(),
370
+ nn.Linear(128, num_classes),
371
+ )
372
+
373
+ def forward(self, x):
374
+ x = self.conv(x)
375
+ x = self.fc(x)
376
+ return x
377
+
378
+ class LSTMModel(nn.Module):
379
+ def __init__(self, input_dim, hidden_dim, num_classes, num_layers=1):
380
+ super().__init__()
381
+ self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
382
+ self.fc = nn.Linear(hidden_dim, num_classes)
383
+
384
+ def forward(self, x):
385
+ _, (hidden, _) = self.lstm(x) # Use the last hidden state
386
+ hidden = hidden[-1] # Take the final layer's hidden state
387
+ return self.fc(hidden)
388
+
389
+
390
+ def main():
391
+ from sklearn.datasets import load_iris, load_breast_cancer
392
+ from sklearn.preprocessing import StandardScaler, LabelEncoder
393
+ from sklearn.model_selection import train_test_split
394
+ import torch
395
+ from torch import nn
396
+
397
+ printer.set_verbose(True)
398
+
399
+ iris = load_iris()
400
+ X = iris.data
401
+ y = iris.target
402
+ y = LabelEncoder().fit_transform(y)
403
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
404
+ scaler = StandardScaler()
405
+ X_train_scaled = scaler.fit_transform(X_train)
406
+ X_test_scaled = scaler.transform(X_test)
407
+
408
+ classical_models = {
409
+ "SimpleNN": (SimpleNN((X_train_scaled.shape[1],), output_classes=len(set(y))), (X_train_scaled.shape[1],)),
410
+ "CNNModel": (CNNModel(input_channels=1, num_classes=len(set(y))), (1, 2, 2)), # Simulated image shape
411
+ "LSTMModel": (LSTMModel(input_dim=X_train_scaled.shape[1], hidden_dim=64, num_classes=len(set(y))), (X_train_scaled.shape[1],)),
412
+ }
413
+
414
+ for model_name, (classical_model, input_shape) in classical_models.items():
415
+ print(f"\nTesting Hybrid Model with {model_name}...")
416
+
417
+ if model_name == "CNNModel":
418
+ # Reshape input into [batch_size, channels, height, width]
419
+ # Assuming features can be reshaped into a 2x2 grid (Iris dataset has 4 features)
420
+ X_train_processed = X_train_scaled.reshape(-1, 1, 2, 2) # 1 channel, 2x2 grid
421
+ X_test_processed = X_test_scaled.reshape(-1, 1, 2, 2)
422
+ if model_name == "LSTMModel":
423
+ # Reshape input into [batch_size, seq_len, features]
424
+ X_train_processed = X_train_scaled.reshape(X_train_scaled.shape[0], 1, X_train_scaled.shape[1]) # 1 time step
425
+ X_test_processed = X_test_scaled.reshape(X_test_scaled.shape[0], 1, X_test_scaled.shape[1])
426
+ else:
427
+ X_train_processed = X_train_scaled
428
+ X_test_processed = X_test_scaled
429
+
430
+ hybrid_model = HybridQuantumClassicalModel(
431
+ classical_model,
432
+ nqubits=4,
433
+ backend=Backend.lightningQubit,
434
+ ansatz=Ansatzs.TREE_TENSOR,
435
+ embedding=Embedding.RX,
436
+ n_class=len(set(y)),
437
+ layers=5,
438
+ fine_tune_classical=True,
439
+ lr=0.01,
440
+ input_shape=input_shape
441
+ )
442
+
443
+ hybrid_model.fit(X_train_processed, y_train, epochs=10)
444
+ predictions = hybrid_model.predict(X_test_processed)
445
+
446
+ predictions = torch.tensor(predictions)
447
+ if predictions.dim() == 1:
448
+ predicted_classes = predictions
449
+ elif predictions.dim() == 2:
450
+ predicted_classes = torch.argmax(predictions, dim=1)
451
+ else:
452
+ raise ValueError(f"Unexpected predictions shape: {predictions.shape}")
453
+
454
+ y_test_tensor = torch.tensor(y_test)
455
+ accuracy = (predicted_classes == y_test_tensor).float().mean()
456
+ print(f"Hybrid Model with {model_name} Accuracy: {accuracy.item():.4f}")
457
+ print(f"Predictions: {predicted_classes.tolist()}")
458
+
459
+ if __name__ == "__main__":
460
+ main()
@@ -2,12 +2,12 @@ import torch
2
2
  import pennylane as qml
3
3
  from time import time
4
4
  import numpy as np
5
- from lazyqml.Interfaces.iModel import Model
6
- from lazyqml.Interfaces.iAnsatz import Ansatz
7
- from lazyqml.Interfaces.iCircuit import Circuit
8
- from lazyqml.Factories.Circuits.fCircuits import *
9
- from lazyqml.Global.globalEnums import Backend
10
- from lazyqml.Utils.Utils import printer
5
+ from Interfaces.iModel import Model
6
+ from Interfaces.iAnsatz import Ansatz
7
+ from Interfaces.iCircuit import Circuit
8
+ from Factories.Circuits.fCircuits import *
9
+ from Global.globalEnums import Backend
10
+ from Utils.Utils import printer
11
11
  import warnings
12
12
 
13
13
  class QNNBag(Model):
@@ -2,13 +2,14 @@ import torch
2
2
  import pennylane as qml
3
3
  from time import time
4
4
  import numpy as np
5
- from lazyqml.Interfaces.iModel import Model
6
- from lazyqml.Interfaces.iAnsatz import Ansatz
7
- from lazyqml.Interfaces.iCircuit import Circuit
8
- from lazyqml.Factories.Circuits.fCircuits import *
9
- from lazyqml.Global.globalEnums import Backend
10
- from lazyqml.Utils.Utils import printer
5
+ from Interfaces.iModel import Model
6
+ from Interfaces.iAnsatz import Ansatz
7
+ from Interfaces.iCircuit import Circuit
8
+ from Factories.Circuits.fCircuits import *
9
+ from Global.globalEnums import Backend
10
+ from Utils.Utils import printer
11
11
  import warnings
12
+
12
13
  class QNNTorch(Model):
13
14
  def __init__(self, nqubits, backend, ansatz, embedding, n_class, layers, epochs, shots, lr, batch_size, seed=1234) -> None:
14
15
  super().__init__()
@@ -30,7 +31,6 @@ class QNNTorch(Model):
30
31
  self.params = None
31
32
  self._build_circuit()
32
33
 
33
-
34
34
  # Suppress all warnings
35
35
  warnings.filterwarnings("ignore")
36
36
 
@@ -79,7 +79,7 @@ class QNNTorch(Model):
79
79
  def fit(self, X, y):
80
80
  # Move the model to the appropriate device (GPU or CPU)
81
81
  self.device = torch.device("cuda:0" if torch.cuda.is_available() and self.backend == Backend.lightningGPU else "cpu")
82
-
82
+ # print(f"USING: {self.device} and {self.deviceQ}")
83
83
 
84
84
  # Convert training data to torch tensors and transfer to device
85
85
  X_train = torch.tensor(X, dtype=torch.float32).to(self.device)
@@ -1,4 +1,4 @@
1
- from lazyqml.Interfaces.iModel import Model
1
+ from Interfaces.iModel import Model
2
2
  import numpy as np
3
3
  from sklearn.svm import SVC
4
4
  from sklearn.preprocessing import StandardScaler
@@ -6,8 +6,8 @@ from sklearn.model_selection import train_test_split
6
6
  from sklearn.metrics import accuracy_score, balanced_accuracy_score
7
7
  import pennylane as qml
8
8
  from time import time
9
- from lazyqml.Factories.Circuits.fCircuits import CircuitFactory
10
- from lazyqml.Utils.Utils import printer
9
+ from Factories.Circuits.fCircuits import CircuitFactory
10
+ from Utils.Utils import printer
11
11
 
12
12
  class QSVM(Model):
13
13
  def __init__(self, nqubits, embedding, backend, shots, seed=1234):
@@ -1,7 +1,7 @@
1
- from lazyqml.Interfaces.iModel import Model
2
- from lazyqml.Interfaces.iAnsatz import Ansatz
3
- from lazyqml.Interfaces.iCircuit import Circuit
4
- from lazyqml.Factories.Circuits.fCircuits import *
1
+ from Interfaces.iModel import Model
2
+ from Interfaces.iAnsatz import Ansatz
3
+ from Interfaces.iCircuit import Circuit
4
+ from Factories.Circuits.fCircuits import *
5
5
 
6
6
  import time
7
7
  import pennylane as qml
@@ -1,7 +1,7 @@
1
- from lazyqml.Factories.Models.QSVM import QSVM
2
- from lazyqml.Factories.Models.QNNBag import QNNBag
3
- from lazyqml.Factories.Models.QNNTorch import QNNTorch
4
- from lazyqml.Global.globalEnums import *
1
+ from Factories.Models.QSVM import *
2
+ from Factories.Models.QNNBag import *
3
+ from Factories.Models.QNNTorch import *
4
+ from Global.globalEnums import *
5
5
 
6
6
  class ModelFactory:
7
7
  def __init__(self) -> None:
@@ -1,6 +1,6 @@
1
- from lazyqml.Interfaces.iPreprocessing import Preprocessing
1
+ # Importing from
2
+ from Interfaces.iPreprocessing import Preprocessing
2
3
  from sklearn.decomposition import PCA
3
- from sklearn.exceptions import NotFittedError
4
4
 
5
5
  class Pca(Preprocessing):
6
6
  def __init__(self, nqubits, ncomponents):
@@ -1,8 +1,8 @@
1
- from lazyqml.Interfaces.iPreprocessing import Preprocessing
1
+ # Importing from
2
+ from Interfaces.iPreprocessing import Preprocessing
2
3
  from sklearn.compose import ColumnTransformer
3
4
  from sklearn.pipeline import Pipeline
4
5
  from sklearn.preprocessing import StandardScaler
5
-
6
6
  from sklearn.compose import make_column_selector as selector
7
7
 
8
8
  class Sanitizer(Preprocessing):
@@ -1,13 +1,7 @@
1
- # Importing Enums
2
- from lazyqml.Global.globalEnums import *
3
-
4
- # Importing Preprocessings
5
- from lazyqml.Factories.Preprocessing.Pca import *
6
- # from Factories.Preprocessing.PcaAmp import *
7
- # from Factories.Preprocessing.PcaTree import *
8
- # from Factories.Preprocessing.PcaTreeAmp import *
9
- from lazyqml.Factories.Preprocessing.Sanitizer import *
10
-
1
+ # Importing from
2
+ from Global.globalEnums import *
3
+ from Factories.Preprocessing.Pca import *
4
+ from Factories.Preprocessing.Sanitizer import *
11
5
 
12
6
  class PreprocessingFactory:
13
7
  def __init__(self, nqubits) -> None:
@@ -24,17 +18,4 @@ class PreprocessingFactory:
24
18
  elif ansatz == Ansatzs.TREE_TENSOR:
25
19
  return Pca(self.nqubits, 2**(self.nqubits.bit_length()-1))
26
20
  else:
27
- return Pca(self.nqubits, self.nqubits)
28
-
29
- """
30
- Deprecated
31
-
32
- # if prep == Preprocessing.PCA:
33
- # return Pca(self.nqubits)
34
- # elif prep == Preprocessing.PCA_AMP:
35
- # return PcaAmp(self.nqubits)
36
- # elif prep == Preprocessing.PCA_TREE:
37
- # return PcaTree(self.nqubits)
38
- # elif prep == Preprocessing.PCA_TREE_AMP:
39
- # return PcaTreeAmp(self.nqubits)
40
- """
21
+ return Pca(self.nqubits, self.nqubits)
@@ -1,5 +1,7 @@
1
1
  """
2
- This file is devoted to define the global enums for easening the interface.
2
+ ------------------------------------------------------------------------------------------------------------------
3
+ This file is devoted to define the global enums for easening the interface.
4
+ ------------------------------------------------------------------------------------------------------------------
3
5
  """
4
6
 
5
7
  from enum import Enum
@@ -1,5 +1,5 @@
1
1
  from abc import abstractmethod
2
- from lazyqml.Interfaces.iCircuit import Circuit
2
+ from Interfaces.iCircuit import Circuit
3
3
 
4
4
  class Ansatz(Circuit):
5
5
  @abstractmethod