lazyqml 2.0.5__py2.py3-none-any.whl → 3.0.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. lazyqml/Factories/Circuits/AmplitudeEmbedding.py +1 -1
  2. lazyqml/Factories/Circuits/HCzRx.py +1 -1
  3. lazyqml/Factories/Circuits/HardwareEfficient.py +1 -1
  4. lazyqml/Factories/Circuits/RxEmbedding.py +1 -1
  5. lazyqml/Factories/Circuits/RyEmbedding.py +1 -1
  6. lazyqml/Factories/Circuits/RzEmbedding.py +1 -1
  7. lazyqml/Factories/Circuits/TreeTensor.py +1 -1
  8. lazyqml/Factories/Circuits/TwoLocal.py +1 -1
  9. lazyqml/Factories/Circuits/ZzEmbedding.py +1 -1
  10. lazyqml/Factories/Circuits/fCircuits.py +10 -10
  11. lazyqml/Factories/Dispatchers/Dispatcher.py +264 -85
  12. lazyqml/Factories/Models/Hybrid.py +460 -0
  13. lazyqml/Factories/Models/QNNBag.py +6 -6
  14. lazyqml/Factories/Models/QNNTorch.py +8 -8
  15. lazyqml/Factories/Models/QSVM.py +3 -3
  16. lazyqml/Factories/Models/_QNNPennylane.py +4 -4
  17. lazyqml/Factories/Models/fModels.py +4 -4
  18. lazyqml/Factories/Preprocessing/Pca.py +2 -2
  19. lazyqml/Factories/Preprocessing/Sanitizer.py +2 -2
  20. lazyqml/Factories/Preprocessing/fPreprocessing.py +5 -24
  21. lazyqml/Global/globalEnums.py +3 -1
  22. lazyqml/Interfaces/iAnsatz.py +1 -1
  23. lazyqml/Utils/Utils.py +203 -84
  24. lazyqml/Utils/Validator.py +4 -7
  25. lazyqml/__init__.py +1 -1
  26. lazyqml/lazyqml.py +50 -48
  27. lazyqml-3.0.1.dist-info/LICENSE +21 -0
  28. {lazyqml-2.0.5.dist-info → lazyqml-3.0.1.dist-info}/METADATA +48 -35
  29. lazyqml-3.0.1.dist-info/RECORD +40 -0
  30. {lazyqml-2.0.5.dist-info → lazyqml-3.0.1.dist-info}/WHEEL +1 -1
  31. lazyqml/.lazyqmlP.py +0 -293
  32. lazyqml/.lazyqmlVote.py +0 -303
  33. lazyqml/Factories/Circuits/_Qkernel.py +0 -16
  34. lazyqml/Factories/Circuits/_Qnn.py +0 -17
  35. lazyqml/Factories/Dispatchers/DispatcherCV.py +0 -143
  36. lazyqml/Factories/Dispatchers/DispatcherNumba.py +0 -226
  37. lazyqml/Factories/Dispatchers/_Dispatcher.py +0 -188
  38. lazyqml/Factories/Dispatchers/_DispatcherMultiprocessing.py +0 -201
  39. lazyqml/Factories/Dispatchers/_QNNBagdispatcher.py +0 -2
  40. lazyqml/Factories/Dispatchers/_QNNdispatcher.py +0 -2
  41. lazyqml/Factories/Dispatchers/_QSVMdispatcher.py +0 -112
  42. lazyqml/Factories/Dispatchers/__Dispatcher.py +0 -193
  43. lazyqml/Factories/Preprocessing/_PcaAmp.py +0 -22
  44. lazyqml/Factories/Preprocessing/_PcaTree.py +0 -22
  45. lazyqml/Factories/Preprocessing/_PcaTreeAmp.py +0 -22
  46. lazyqml/Lanza copy.sh +0 -32
  47. lazyqml/Lanza.sh +0 -21
  48. lazyqml/mem.py +0 -85
  49. lazyqml-2.0.5.dist-info/RECORD +0 -56
  50. {lazyqml-2.0.5.dist-info → lazyqml-3.0.1.dist-info}/AUTHORS.rst +0 -0
  51. /lazyqml-2.0.5.dist-info/LICENSE → /lazyqml-3.0.1.dist-info/LICENSE copy +0 -0
  52. {lazyqml-2.0.5.dist-info → lazyqml-3.0.1.dist-info}/entry_points.txt +0 -0
  53. {lazyqml-2.0.5.dist-info → lazyqml-3.0.1.dist-info}/top_level.txt +0 -0
lazyqml/Utils/Utils.py CHANGED
@@ -1,9 +1,23 @@
1
- from lazyqml.Global.globalEnums import *
2
- from itertools import product
1
+ # Imports
2
+ import pandas as pd
3
3
  import numpy as np
4
4
  import torch
5
5
  import psutil
6
+ import GPUtil
7
+
8
+ # Importing from
9
+ from sklearn.model_selection import LeaveOneOut, StratifiedKFold, train_test_split
10
+ from Global.globalEnums import *
11
+ from itertools import product
6
12
 
13
+ """
14
+ ------------------------------------------------------------------------------------------------------------------
15
+ Verbose printer class
16
+ - This class implements the functionlity to print or not depending on a boolean flag
17
+ - The message is preceded by "[VERBOSE] {message}"
18
+ - It is implemented as a Singleton Object
19
+ ------------------------------------------------------------------------------------------------------------------
20
+ """
7
21
  class VerbosePrinter:
8
22
  _instance = None
9
23
  _initialized = False
@@ -32,8 +46,17 @@ class VerbosePrinter:
32
46
  if cls._instance is None:
33
47
  cls._instance = VerbosePrinter()
34
48
  return cls._instance
49
+
50
+ """
51
+ ------------------------------------------------------------------------------------------------------------------
52
+ Miscelaneous Utils
53
+ ------------------------------------------------------------------------------------------------------------------
54
+ """
35
55
 
36
56
  def adjustQubits(nqubits, numClasses):
57
+ """
58
+ Adjust the number of qubits to be able to solve the problem
59
+ """
37
60
  # Find the next power of 2 greater than numClasses
38
61
  power = np.ceil(np.log2(numClasses))
39
62
  nqubits = 2 ** power
@@ -42,120 +65,216 @@ def adjustQubits(nqubits, numClasses):
42
65
  nqubits *= 2
43
66
  return int(nqubits)
44
67
 
68
+ def calculate_quantum_memory(num_qubits, overhead=2):
69
+ """
70
+ Estimates the memory in MiB used by the quantum circuits.
71
+ """
72
+ # Each qubit state requires 2 complex numbers (amplitude and phase)
73
+ # Each complex number uses 2 double-precision floats (16 bytes)
74
+ bytes_per_qubit_state = 16
45
75
 
46
- def create_combinationsCV(classifiers, embeddings, ansatzs, features, qubits, FoldID, RepeatID):
47
- classifier_list = []
48
- embedding_list = []
49
- ansatzs_list = []
50
-
51
- # Make sure we don't have duplicated items
52
- classifiers = list(classifiers)
53
- embeddings = list(embeddings)
54
- ansatzs = list(ansatzs)
55
- qubits = sorted(list(qubits)) # Convert the qubits set to a list as well
56
- FoldID = sorted(list(FoldID))
57
- RepeatID = sorted(list(RepeatID))
58
-
59
- if Model.ALL in classifiers:
60
- classifier_list = Model.list()
61
- classifier_list.remove(Model.ALL)
62
- else:
63
- classifier_list = classifiers
64
-
65
- if Embedding.ALL in embeddings:
66
- embedding_list = Embedding.list()
67
- embedding_list.remove(Embedding.ALL)
68
- else:
69
- embedding_list = embeddings
70
-
71
- if Ansatzs.ALL in ansatzs:
72
- ansatzs_list = Ansatzs.list()
73
- ansatzs_list.remove(Ansatzs.ALL)
74
- else:
75
- ansatzs_list = ansatzs
76
+ # Number of possible states is 2^n, where n is the number of qubits
77
+ num_states = 2 ** num_qubits
76
78
 
77
- combinations = []
79
+ # Total memory in bytes
80
+ total_memory_bytes = num_states * bytes_per_qubit_state * overhead
78
81
 
79
- for classifier in classifier_list:
80
- if classifier == Model.QSVM:
81
- # QSVM doesn't use ansatzs or features but uses qubits (first in the product)
82
- combinations.extend(list(product(qubits, [classifier], embedding_list, [None], [None], RepeatID, FoldID)))
83
- elif classifier == Model.QNN:
84
- # QNN uses ansatzs and qubits (qubits first)
85
- combinations.extend(list(product(qubits, [classifier], embedding_list, ansatzs_list, [None], RepeatID, FoldID)))
86
- elif classifier == Model.QNN_BAG:
87
- # QNN_BAG uses ansatzs, features, and qubits (qubits first)
88
- combinations.extend(list(product(qubits, [classifier], embedding_list, ansatzs_list, features, RepeatID, FoldID)))
82
+ # Convert to more readable units
89
83
 
90
- return combinations
84
+ return total_memory_bytes / (1024**2)
91
85
 
92
- def create_combinations(classifiers, embeddings, ansatzs, features, qubits):
86
+ def calculate_free_memory():
87
+ """
88
+ Calculates the amount of free RAM
89
+ """
90
+ # Use psutil to get available system memory (in MiB)
91
+ mem = psutil.virtual_memory()
92
+ free_ram_mb = mem.available / (1024 ** 2) # Convert bytes to MiB
93
+ return free_ram_mb
94
+
95
+ def calculate_free_video_memory():
96
+ """
97
+ Calculates the amount of free Video Memory
98
+ """
99
+ # Use psutil to get available system memory (in MiB)
100
+ return GPUtil.getGPUs()[0].memoryFree
101
+
102
+
103
+ def create_combinations(classifiers, embeddings, ansatzs, features, qubits, FoldID, RepeatID):
93
104
  classifier_list = []
94
105
  embedding_list = []
95
106
  ansatzs_list = []
96
-
107
+
97
108
  # Make sure we don't have duplicated items
98
109
  classifiers = list(classifiers)
99
110
  embeddings = list(embeddings)
100
111
  ansatzs = list(ansatzs)
101
- qubits = sorted(list(qubits)) # Convert the qubits set to a list as well
102
-
112
+ qubit_values = sorted(list(qubits))
113
+ FoldID = sorted(list(FoldID))
114
+ RepeatID = sorted(list(RepeatID))
115
+
103
116
  if Model.ALL in classifiers:
104
117
  classifier_list = Model.list()
105
118
  classifier_list.remove(Model.ALL)
106
119
  else:
107
120
  classifier_list = classifiers
108
-
121
+
109
122
  if Embedding.ALL in embeddings:
110
123
  embedding_list = Embedding.list()
111
124
  embedding_list.remove(Embedding.ALL)
112
125
  else:
113
126
  embedding_list = embeddings
114
-
127
+
115
128
  if Ansatzs.ALL in ansatzs:
116
129
  ansatzs_list = Ansatzs.list()
117
130
  ansatzs_list.remove(Ansatzs.ALL)
118
131
  else:
119
132
  ansatzs_list = ansatzs
120
-
133
+
121
134
  combinations = []
122
-
123
- for classifier in classifier_list:
124
- if classifier == Model.QSVM:
125
- # QSVM doesn't use ansatzs or features but uses qubits (first in the product)
126
- combinations.extend(list(product(qubits, [classifier], embedding_list, [None], [None])))
127
- elif classifier == Model.QNN:
128
- # QNN uses ansatzs and qubits (qubits first)
129
- combinations.extend(list(product(qubits, [classifier], embedding_list, ansatzs_list, [None])))
130
- elif classifier == Model.QNN_BAG:
131
- # QNN_BAG uses ansatzs, features, and qubits (qubits first)
132
- combinations.extend(list(product(qubits, [classifier], embedding_list, ansatzs_list, features)))
133
-
135
+ # Create all base combinations first
136
+ for qubits in qubit_values:
137
+ for classifier in classifier_list:
138
+ temp_combinations = []
139
+ if classifier == Model.QSVM:
140
+ # QSVM doesn't use ansatzs or features but uses qubits
141
+ temp_combinations = list(product([qubits], [classifier], embedding_list, [None], [None], RepeatID, FoldID))
142
+ elif classifier == Model.QNN:
143
+ # QNN uses ansatzs and qubits
144
+ temp_combinations = list(product([qubits], [classifier], embedding_list, ansatzs_list, [None], RepeatID, FoldID))
145
+ elif classifier == Model.QNN_BAG:
146
+ # QNN_BAG uses ansatzs, features, and qubits
147
+ temp_combinations = list(product([qubits], [classifier], embedding_list, ansatzs_list, features, RepeatID, FoldID))
148
+
149
+ # Add memory calculation for each combination
150
+ for combo in temp_combinations:
151
+ memory = calculate_quantum_memory(combo[0]) # Calculate memory based on number of qubits
152
+ combinations.append(combo + (memory,))
153
+
134
154
  return combinations
135
155
 
136
156
  def fixSeed(seed):
137
157
  np.random.seed(seed=seed)
138
158
  torch.manual_seed(seed)
139
159
 
140
- def calculate_quantum_memory(num_qubits, overhead=2):
141
- # Each qubit state requires 2 complex numbers (amplitude and phase)
142
- # Each complex number uses 2 double-precision floats (16 bytes)
143
- bytes_per_qubit_state = 16
144
160
 
145
- # Number of possible states is 2^n, where n is the number of qubits
146
- num_states = 2 ** num_qubits
147
-
148
- # Total memory in bytes
149
- total_memory_bytes = num_states * bytes_per_qubit_state * overhead
150
-
151
- # Convert to more readable units
152
-
153
- return total_memory_bytes / (1024**3)
154
-
155
- def calculate_free_memory():
156
- # Use psutil to get available system memory (in GB)
157
- mem = psutil.virtual_memory()
158
- free_ram_gb = mem.available / (1024 ** 3) # Convert bytes to GB
159
- return free_ram_gb
160
-
161
- printer = VerbosePrinter()
161
+ def generate_cv_indices(X, y, mode="cross-validation", test_size=0.4, n_splits=5, n_repeats=1, random_state=None):
162
+ """
163
+ Generate train and test indices for either cross-validation, holdout split, or leave-one-out.
164
+
165
+ Parameters:
166
+ X (pd.DataFrame or np.ndarray): The features matrix.
167
+ y (pd.Series or np.ndarray): The target vector.
168
+ mode (str): "cross-validation", "holdout", or "leave-one-out".
169
+ test_size (float): Test set proportion for the holdout split (ignored for CV and LOO).
170
+ n_splits (int): Number of folds in StratifiedKFold (ignored for holdout and LOO).
171
+ n_repeats (int): Number of repeats for cross-validation (ignored for holdout and LOO).
172
+ random_state (int): Random state for reproducibility.
173
+
174
+ Returns:
175
+ dict: A dictionary of train/test indices.
176
+ """
177
+ cv_indices = {}
178
+
179
+ if mode == "holdout":
180
+ # Single train-test split for holdout
181
+ train_idx, test_idx = train_test_split(
182
+ np.arange(len(X)),
183
+ test_size=test_size,
184
+ stratify=y,
185
+ random_state=random_state
186
+ )
187
+ cv_indices[(0, 0)] = {
188
+ 'train_idx': train_idx,
189
+ 'test_idx': test_idx
190
+ }
191
+
192
+ elif mode == "cross-validation":
193
+ # StratifiedKFold for cross-validation splits
194
+ for repeat in range(n_repeats):
195
+ skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state + repeat if random_state is not None else None)
196
+ for fold, (train_idx, test_idx) in enumerate(skf.split(X, y)):
197
+ cv_indices[(repeat, fold)] = {
198
+ 'train_idx': train_idx,
199
+ 'test_idx': test_idx
200
+ }
201
+
202
+ elif mode == "leave-one-out":
203
+ # LeaveOneOut cross-validation
204
+ loo = LeaveOneOut()
205
+ for fold, (train_idx, test_idx) in enumerate(loo.split(X)):
206
+ cv_indices[(0, fold)] = {
207
+ 'train_idx': train_idx,
208
+ 'test_idx': test_idx
209
+ }
210
+
211
+ else:
212
+ raise ValueError("Invalid mode. Choose 'holdout', 'cross-validation', or 'leave-one-out'.")
213
+
214
+ return cv_indices
215
+
216
+ def get_train_test_split(cv_indices, repeat_id=0, fold_id=0):
217
+ """
218
+ Retrieve the train and test indices for a given repeat and fold ID.
219
+
220
+ Parameters:
221
+ cv_indices (dict): The cross-validation indices dictionary.
222
+ repeat_id (int): The repeat ID (0 to n_repeats-1 or 0 for holdout/LOO).
223
+ fold_id (int): The fold ID within the specified repeat.
224
+
225
+ Returns:
226
+ tuple: (train_idx, test_idx) arrays for the specified fold and repeat.
227
+ """
228
+ indices = cv_indices.get((repeat_id, fold_id))
229
+ if indices is None:
230
+ print(f"RepeatID {repeat_id}, FoldID{fold_id}")
231
+ raise ValueError("Invalid repeat_id or fold_id specified.")
232
+
233
+ return indices['train_idx'], indices['test_idx']
234
+
235
+
236
+
237
+ def dataProcessing(X, y, prepFactory, customImputerCat, customImputerNum,
238
+ train_idx, test_idx, ansatz=None, embedding=None):
239
+ """
240
+ Process data for specific train/test indices.
241
+
242
+ Parameters:
243
+ - X: Input features
244
+ - y: Target variable
245
+ - prepFactory: Preprocessing factory object
246
+ - customImputerCat: Categorical imputer
247
+ - customImputerNum: Numerical imputer
248
+ - train_idx: Training set indices
249
+ - test_idx: Test set indices
250
+ - ansatz: Optional preprocessing ansatz
251
+ - embedding: Optional embedding method
252
+
253
+ Returns:
254
+ Tuple of (X_train_processed, X_test_processed, y_train, y_test)
255
+ """
256
+ # Split the data using provided indices
257
+ X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]
258
+ y_train, y_test = y[train_idx], y[test_idx]
259
+
260
+ # Create sanitizer and preprocess
261
+ sanitizer = prepFactory.GetSanitizer(customImputerCat, customImputerNum)
262
+ X_train = pd.DataFrame(sanitizer.fit_transform(X_train))
263
+ X_test = pd.DataFrame(sanitizer.transform(X_test))
264
+
265
+ # Apply additional preprocessing if ansatz/embedding provided
266
+ if ansatz is not None or embedding is not None:
267
+ preprocessing = prepFactory.GetPreprocessing(ansatz=ansatz, embedding=embedding)
268
+ X_train_processed = np.array(preprocessing.fit_transform(X_train, y=y_train))
269
+ X_test_processed = np.array(preprocessing.transform(X_test))
270
+ else:
271
+ X_train_processed = np.array(X_train)
272
+ X_test_processed = np.array(X_test)
273
+
274
+ # Convert target variables to numpy arrays
275
+ y_train = np.array(y_train)
276
+ y_test = np.array(y_test)
277
+
278
+ return X_train_processed, X_test_processed, y_train, y_test
279
+
280
+ printer = VerbosePrinter()
@@ -1,12 +1,9 @@
1
- from pydantic import BaseModel, ValidationError, field_validator , ConfigDict
2
- from typing import Callable
3
- import inspect
4
- from sklearn.metrics import accuracy_score
5
- from sklearn.preprocessing import OneHotEncoder, StandardScaler
6
1
  import numpy as np
7
- from typing import Any
8
2
  import pandas as pd
9
3
 
4
+ from pydantic import BaseModel, ConfigDict, field_validator
5
+ from typing import Any
6
+
10
7
  class FitParamsValidator(BaseModel):
11
8
  model_config = ConfigDict(arbitrary_types_allowed=True) # Allow arbitrary types like DataFrame and ndarray
12
9
 
@@ -77,7 +74,7 @@ class FitParamsValidatorCV(BaseModel):
77
74
 
78
75
  if size1 != size2:
79
76
  raise ValueError(f"{name1} and {name2} must have the same number of examples. "
80
- f"Got {size1} and {size2}.")
77
+ f"Got {size1} and {size2}.")
81
78
 
82
79
  # Ensure inputs are valid DataFrames or NumPy arrays, and not empty/null
83
80
  @field_validator("x", "y")
lazyqml/__init__.py CHANGED
@@ -2,4 +2,4 @@
2
2
 
3
3
  __author__ = """Diego García Vega, Fernando Álvaro Plou Llorente, Alejandro Leal Castaño"""
4
4
  __email__ = "garciavdiego@uniovi.es, ploufernando@uniovi.es, lealcalejandro@uniovi.es"
5
- __version__ = "2.0.5"
5
+ __version__ = "3.0.1"
lazyqml/lazyqml.py CHANGED
@@ -1,22 +1,19 @@
1
+ import inspect
2
+ import warnings
1
3
  import numpy as np
2
4
  import pandas as pd
3
- import sys
4
5
  from tabulate import tabulate
5
6
  from pydantic import BaseModel, Field, model_validator, field_validator, ValidationError, conset
6
7
  from pydantic.config import ConfigDict
7
8
  from typing import List, Callable, Optional, Set
8
9
  from typing_extensions import Annotated, Set
9
-
10
-
11
10
  from lazyqml.Factories.Preprocessing.fPreprocessing import PreprocessingFactory
12
11
  from lazyqml.Global.globalEnums import *
13
12
  from lazyqml.Utils.Utils import *
14
13
  from lazyqml.Utils.Validator import *
15
- from lazyqml.Factories.Dispatchers.DispatcherCV import *
16
14
  from lazyqml.Factories.Dispatchers.Dispatcher import *
17
- from sklearn.impute import SimpleImputer
18
- from ucimlrepo import fetch_ucirepo
19
- from sklearn.preprocessing import LabelEncoder
15
+
16
+ from time import time
20
17
 
21
18
  class QuantumClassifier(BaseModel):
22
19
  """
@@ -101,13 +98,12 @@ class QuantumClassifier(BaseModel):
101
98
  shots: Annotated[int, Field(gt=0)] = 1
102
99
  runs: Annotated[int, Field(gt=0)] = 1
103
100
  batchSize: Annotated[int, Field(gt=0)] = 8
104
- threshold: Annotated[int, Field(gt=0)] = 26
101
+ threshold: Annotated[int, Field(gt=0)] = 22
105
102
  maxSamples: Annotated[float, Field(gt=0, le=1)] = 1.0
106
103
  verbose: bool = False
107
104
  customMetric: Optional[Callable] = None
108
105
  customImputerNum: Optional[Any] = None
109
106
  customImputerCat: Optional[Any] = None
110
- batch: Optional[bool] = True
111
107
  cores: Optional[int] = True
112
108
 
113
109
  @field_validator('nqubits', mode='before')
@@ -201,25 +197,29 @@ class QuantumClassifier(BaseModel):
201
197
 
202
198
  return preprocessor
203
199
 
204
- def fit(self, X_train, y_train, X_test, y_test,showTable=True):
205
-
206
-
200
+ def fit(self, X, y, test_size=0.4, showTable=True):
201
+ """
202
+
203
+ """
204
+ warnings.filterwarnings("ignore")
207
205
  printer.set_verbose(verbose=self.verbose)
208
206
  # Validation model to ensure input parameters are DataFrames and sizes match
209
- FitParamsValidator(
210
- train_x=X_train,
211
- train_y=y_train,
212
- test_x=X_test,
213
- test_y=y_test
207
+ FitParamsValidatorCV(
208
+ x=X,
209
+ y=y
214
210
  )
215
211
  printer.print("Validation successful, fitting the model...")
216
212
 
217
213
  # Fix seed
218
214
  fixSeed(self.randomstate)
219
- d = Dispatcher(sequential=self.sequential,threshold=self.threshold)
220
- d.dispatch(nqubits=self.nqubits,randomstate=self.randomstate,predictions=self.predictions,numPredictors=self.numPredictors,numLayers=self.numLayers,classifiers=self.classifiers,ansatzs=self.ansatzs,backend=self.backend,embeddings=self.embeddings,features=self.features,learningRate=self.learningRate,epochs=self.epochs,runs=self.runs,maxSamples=self.maxSamples,verbose=self.verbose,customMetric=self.customMetric,customImputerNum=self.customImputerNum,customImputerCat=self.customImputerCat, X_train=X_train,y_train=y_train, X_test=X_test, y_test=y_test,shots=self.shots,showTable=showTable,batch=self.batchSize,auto=self.batch)
215
+ d = Dispatcher(sequential=self.sequential,threshold=self.threshold,repeats=1, folds=1)
216
+ d.dispatch(nqubits=self.nqubits,randomstate=self.randomstate,predictions=self.predictions,numPredictors=self.numPredictors,numLayers=self.numLayers,classifiers=self.classifiers,ansatzs=self.ansatzs,backend=self.backend,embeddings=self.embeddings,features=self.features,learningRate=self.learningRate,epochs=self.epochs,runs=self.runs,maxSamples=self.maxSamples,verbose=self.verbose,customMetric=self.customMetric,customImputerNum=self.customImputerNum,customImputerCat=self.customImputerCat, X=X ,y=y,shots=self.shots,showTable=showTable,batch=self.batchSize,mode="holdout",testsize=test_size)
221
217
 
222
218
  def repeated_cross_validation(self, X, y, n_splits=10, n_repeats=5, showTable=True):
219
+ """
220
+
221
+ """
222
+ warnings.filterwarnings("ignore")
223
223
  printer.set_verbose(verbose=self.verbose)
224
224
  # Validation model to ensure input parameters are DataFrames and sizes match
225
225
  FitParamsValidatorCV(
@@ -230,45 +230,47 @@ class QuantumClassifier(BaseModel):
230
230
 
231
231
  # Fix seed
232
232
  fixSeed(self.randomstate)
233
- d = DispatcherCV(sequential=self.sequential,threshold=self.threshold,repeats=n_repeats,folds=n_splits)
234
- d.dispatch(nqubits=self.nqubits,randomstate=self.randomstate,predictions=self.predictions,numPredictors=self.numPredictors,numLayers=self.numLayers,classifiers=self.classifiers,ansatzs=self.ansatzs,backend=self.backend,embeddings=self.embeddings,features=self.features,learningRate=self.learningRate,epochs=self.epochs,runs=self.runs,maxSamples=self.maxSamples,verbose=self.verbose,customMetric=self.customMetric,customImputerNum=self.customImputerNum,customImputerCat=self.customImputerCat,X_train=X ,X_test=X,y_test=y,y_train=y,shots=self.shots,showTable=showTable,batch=self.batchSize,auto=self.batch,cores=self.cores)
233
+ d = Dispatcher(sequential=self.sequential,threshold=self.threshold,repeats=n_repeats,folds=n_splits)
234
+ d.dispatch(nqubits=self.nqubits,randomstate=self.randomstate,predictions=self.predictions,numPredictors=self.numPredictors,numLayers=self.numLayers,classifiers=self.classifiers,ansatzs=self.ansatzs,backend=self.backend,embeddings=self.embeddings,features=self.features,learningRate=self.learningRate,epochs=self.epochs,runs=self.runs,maxSamples=self.maxSamples,verbose=self.verbose,customMetric=self.customMetric,customImputerNum=self.customImputerNum,customImputerCat=self.customImputerCat,X=X ,y=y,shots=self.shots,showTable=showTable,batch=self.batchSize,mode="cross-validation")
235
235
 
236
236
  def leave_one_out(self, X, y, showTable=True):
237
- pass
237
+ """
238
+
239
+ """
240
+ warnings.filterwarnings("ignore")
241
+ printer.set_verbose(verbose=self.verbose)
242
+ # Validation model to ensure input parameters are DataFrames and sizes match
243
+ FitParamsValidatorCV(
244
+ x=X,
245
+ y=y
246
+ )
247
+ printer.print("Validation successful, fitting the model...")
248
+
249
+ # Fix seed
250
+ fixSeed(self.randomstate)
251
+ d = Dispatcher(sequential=self.sequential,threshold=self.threshold,folds=len(X),repeats=1)
252
+ d.dispatch(nqubits=self.nqubits,randomstate=self.randomstate,predictions=self.predictions,numPredictors=self.numPredictors,numLayers=self.numLayers,classifiers=self.classifiers,ansatzs=self.ansatzs,backend=self.backend,embeddings=self.embeddings,features=self.features,learningRate=self.learningRate,epochs=self.epochs,runs=self.runs,maxSamples=self.maxSamples,verbose=self.verbose,customMetric=self.customMetric,customImputerNum=self.customImputerNum,customImputerCat=self.customImputerCat,X=X ,y=y,shots=self.shots,showTable=showTable,batch=self.batchSize,mode="leave-one-out")
238
253
 
239
- if __name__ == '__main__':
240
- Batch_auto = True
241
- Sequential = sys.argv[1].lower() == 'true'
242
- Node = sys.argv[2].lower()
243
- qubits = int(sys.argv[3])
244
- cores = int(sys.argv[4])
245
254
 
255
+ if __name__ == '__main__':
256
+ Sequential = False
257
+ Node = "slave4"
258
+ qubits = 4
259
+ cores = 6
246
260
 
247
261
  from sklearn.datasets import load_iris
248
262
 
249
- dataset="iris"
250
-
251
263
  # Load data
252
264
  data = load_iris()
253
265
  X = data.data
254
266
  y = data.target
255
267
 
268
+ repeats = 2
269
+ embeddings = {Embedding.ZZ}
256
270
 
257
- if Node == "slave1":
258
- repeats = 4
259
- embeddings = {Embedding.AMP}
260
- elif Node == "slave2":
261
- repeats = 4
262
- embeddings = {Embedding.ZZ}
263
- elif Node == "slave5":
264
- repeats = 2
265
- embeddings = {Embedding.ZZ}
266
-
267
- print(f"PARAMETERS\nEmbeddings: {embeddings}\tBatch Auto: {Batch_auto}\tSequential: {Sequential}\tNode: {Node}\tDataset: {dataset}\tQubits: {qubits}\t Folds\\Repeats: {(8,repeats)}\tCores: {cores}")
268
-
269
- classifier = QuantumClassifier(nqubits={qubits},classifiers={Model.QSVM},embeddings=embeddings,features={1.0},verbose=True,sequential=Sequential,backend=Backend.lightningQubit,batch=Batch_auto,cores=cores)
270
-
271
- start = time.time()
272
- classifier.repeated_cross_validation(X,y,n_repeats=repeats,n_splits=8)
273
- print(f"TOTAL TIME: {time.time()-start}s\t PARALLEL: {not Sequential}")
274
-
271
+ classifier = QuantumClassifier(nqubits={4})
272
+
273
+ start = time()
274
+
275
+ classifier.repeated_cross_validation(X,y,n_splits=2,n_repeats=1)
276
+ print(f"TOTAL TIME: {time()-start}s\t PARALLEL: {not Sequential}")
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 QHPC & SP Research Lab
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.