lazyqml 2.0.5__py2.py3-none-any.whl → 3.0.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. lazyqml/Factories/Circuits/AmplitudeEmbedding.py +1 -1
  2. lazyqml/Factories/Circuits/HCzRx.py +1 -1
  3. lazyqml/Factories/Circuits/HardwareEfficient.py +1 -1
  4. lazyqml/Factories/Circuits/RxEmbedding.py +1 -1
  5. lazyqml/Factories/Circuits/RyEmbedding.py +1 -1
  6. lazyqml/Factories/Circuits/RzEmbedding.py +1 -1
  7. lazyqml/Factories/Circuits/TreeTensor.py +1 -1
  8. lazyqml/Factories/Circuits/TwoLocal.py +1 -1
  9. lazyqml/Factories/Circuits/ZzEmbedding.py +1 -1
  10. lazyqml/Factories/Circuits/fCircuits.py +10 -10
  11. lazyqml/Factories/Dispatchers/Dispatcher.py +264 -85
  12. lazyqml/Factories/Models/Hybrid.py +460 -0
  13. lazyqml/Factories/Models/QNNBag.py +6 -6
  14. lazyqml/Factories/Models/QNNTorch.py +8 -8
  15. lazyqml/Factories/Models/QSVM.py +3 -3
  16. lazyqml/Factories/Models/_QNNPennylane.py +4 -4
  17. lazyqml/Factories/Models/fModels.py +4 -4
  18. lazyqml/Factories/Preprocessing/Pca.py +2 -2
  19. lazyqml/Factories/Preprocessing/Sanitizer.py +2 -2
  20. lazyqml/Factories/Preprocessing/fPreprocessing.py +5 -24
  21. lazyqml/Global/globalEnums.py +3 -1
  22. lazyqml/Interfaces/iAnsatz.py +1 -1
  23. lazyqml/Utils/Utils.py +203 -84
  24. lazyqml/Utils/Validator.py +4 -7
  25. lazyqml/__init__.py +1 -1
  26. lazyqml/lazyqml.py +54 -49
  27. lazyqml-3.0.0.dist-info/LICENSE +21 -0
  28. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/METADATA +48 -35
  29. lazyqml-3.0.0.dist-info/RECORD +40 -0
  30. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/WHEEL +1 -1
  31. lazyqml/.lazyqmlP.py +0 -293
  32. lazyqml/.lazyqmlVote.py +0 -303
  33. lazyqml/Factories/Circuits/_Qkernel.py +0 -16
  34. lazyqml/Factories/Circuits/_Qnn.py +0 -17
  35. lazyqml/Factories/Dispatchers/DispatcherCV.py +0 -143
  36. lazyqml/Factories/Dispatchers/DispatcherNumba.py +0 -226
  37. lazyqml/Factories/Dispatchers/_Dispatcher.py +0 -188
  38. lazyqml/Factories/Dispatchers/_DispatcherMultiprocessing.py +0 -201
  39. lazyqml/Factories/Dispatchers/_QNNBagdispatcher.py +0 -2
  40. lazyqml/Factories/Dispatchers/_QNNdispatcher.py +0 -2
  41. lazyqml/Factories/Dispatchers/_QSVMdispatcher.py +0 -112
  42. lazyqml/Factories/Dispatchers/__Dispatcher.py +0 -193
  43. lazyqml/Factories/Preprocessing/_PcaAmp.py +0 -22
  44. lazyqml/Factories/Preprocessing/_PcaTree.py +0 -22
  45. lazyqml/Factories/Preprocessing/_PcaTreeAmp.py +0 -22
  46. lazyqml/Lanza copy.sh +0 -32
  47. lazyqml/Lanza.sh +0 -21
  48. lazyqml/mem.py +0 -85
  49. lazyqml-2.0.5.dist-info/RECORD +0 -56
  50. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/AUTHORS.rst +0 -0
  51. /lazyqml-2.0.5.dist-info/LICENSE → /lazyqml-3.0.0.dist-info/LICENSE copy +0 -0
  52. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/entry_points.txt +0 -0
  53. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/top_level.txt +0 -0
@@ -1,226 +0,0 @@
1
- from numba import jit, prange
2
- import numpy as np
3
- import pandas as pd
4
- from concurrent.futures import ThreadPoolExecutor
5
- import time
6
- from sklearn.metrics import f1_score, accuracy_score, balanced_accuracy_score
7
- from Factories.Models.fModels import *
8
- from Factories.Preprocessing.fPreprocessing import *
9
- from Utils.Utils import *
10
- import time
11
- import math
12
-
13
- class Dispatcher:
14
- def __init__(self, sequential, threshold=27):
15
- self.sequential = sequential
16
- self.threshold = threshold
17
-
18
- @staticmethod
19
- @jit(nopython=True, parallel=True)
20
- def _parallel_predictions(model_predict_func, X_test_values):
21
- """
22
- Parallel prediction using Numba
23
- Note: This is a simplified version - actual implementation would need
24
- to account for model specifics
25
- """
26
- n_samples = X_test_values.shape[0]
27
- predictions = np.zeros(n_samples)
28
-
29
- for i in prange(n_samples):
30
- predictions[i] = model_predict_func(X_test_values[i])
31
-
32
- return predictions
33
-
34
- @staticmethod
35
- @jit(nopython=True)
36
- def _calculate_metrics(y_true, y_pred):
37
- """
38
- Calculate basic metrics using Numba
39
- Note: This is a simplified version - actual implementation would need
40
- to match sklearn's metric calculations
41
- """
42
- n_samples = len(y_true)
43
- correct = 0
44
- for i in range(n_samples):
45
- if y_true[i] == y_pred[i]:
46
- correct += 1
47
- return correct / n_samples
48
-
49
- def _executeModel(self, model, X_train, y_train, X_test, y_test, predictions, runs, customMetric):
50
- preds = []
51
- accuracyR, b_accuracyR, f1R, customR = 0, 0, 0, 0
52
- custom = None
53
- total_exeT = 0
54
-
55
- # Convert data to numpy arrays for Numba compatibility
56
- X_train_values = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
57
- X_test_values = X_test.values if isinstance(X_test, pd.DataFrame) else X_test
58
- y_train_values = y_train.values if isinstance(y_train, pd.Series) else y_train
59
- y_test_values = y_test.values if isinstance(y_test, pd.Series) else y_test
60
-
61
- for j in range(runs):
62
- printer.print(f"\tExecuting {j+1} run of {runs}")
63
- start = time.time()
64
-
65
- # Training can't be easily Numba-optimized due to model complexity
66
- model.fit(X=X_train, y=y_train)
67
-
68
- # Try to use Numba for predictions if possible
69
- try:
70
- y_pred = self._parallel_predictions(model.predict, X_test_values)
71
- except:
72
- # Fallback to regular prediction if Numba fails
73
- y_pred = model.predict(X=X_test)
74
-
75
- run_time = time.time() - start
76
- total_exeT += run_time
77
-
78
- # Try to use Numba for metric calculation
79
- try:
80
- accuracy = self._calculate_metrics(y_test_values, y_pred)
81
- accuracyR += accuracy
82
- except:
83
- # Fallback to sklearn metrics if Numba fails
84
- accuracyR += accuracy_score(y_test, y_pred, normalize=True)
85
-
86
- # These metrics are more complex and may not benefit from Numba
87
- b_accuracyR += balanced_accuracy_score(y_test, y_pred)
88
- f1R += f1_score(y_test, y_pred, average="weighted")
89
-
90
- if customMetric is not None:
91
- customR += customMetric(y_test, y_pred)
92
-
93
- if predictions:
94
- preds = y_pred
95
-
96
- accuracy = accuracyR/runs
97
- b_accuracy = b_accuracyR/runs
98
- f1 = f1R/runs
99
- if customMetric is not None:
100
- custom = customR/runs
101
-
102
- avg_exeT = total_exeT/runs
103
- return avg_exeT, accuracy, b_accuracy, f1, custom, preds
104
-
105
- def dispatch(self, nqubits, randomstate, predictions, shots,
106
- numPredictors, numLayers, classifiers, ansatzs, backend,
107
- embeddings, features, learningRate, epochs, runs, batch,
108
- maxSamples, verbose, customMetric, customImputerNum, customImputerCat,
109
- X_train, y_train, X_test, y_test, showTable=True):
110
-
111
- NAMES, EMBEDDINGS, ANSATZ, ACCURACY = [], [], [], []
112
- B_ACCURACY, FEATURES, F1, TIME, CUSTOM = [], [], [], [], []
113
-
114
- numClasses = len(np.unique(y_train))
115
-
116
- if (numClasses > 2**math.floor(math.log2(nqubits))):
117
- printer.print("The number of qubits must exceed the number of classes and be a power of 2.")
118
- adjustedQubits = adjustQubits(nqubits=nqubits, numClasses=numClasses)
119
- printer.print(f"New number of qubits:\t{adjustedQubits}")
120
- else:
121
- adjustedQubits = nqubits
122
-
123
- # Convert input data to pandas DataFrames if needed
124
- if not isinstance(X_train, pd.DataFrame):
125
- X_train = pd.DataFrame(X_train)
126
- if not isinstance(X_test, pd.DataFrame):
127
- X_test = pd.DataFrame(X_test)
128
-
129
- # Preprocessing
130
- prepFactory = PreprocessingFactory(nqubits)
131
- sanitizer = prepFactory.GetSanitizer(customImputerCat, customImputerNum)
132
- X_train = sanitizer.fit_transform(X_train)
133
- X_test = sanitizer.transform(X_test)
134
-
135
- combinations = create_combinations(classifiers=classifiers, embeddings=embeddings,
136
- features=features, ansatzs=ansatzs)
137
-
138
- models_to_run = []
139
- for combination in combinations:
140
- name, embedding, ansatz, feature = combination
141
- printer.print("="*100)
142
- feature = feature if feature is not None else "~"
143
- printer.print(f"Model: {name} Embedding: {embedding} Ansatz:{ansatz} Features: {feature}")
144
-
145
- model = ModelFactory().getModel(
146
- Nqubits=adjustedQubits, model=name, Embedding=embedding,
147
- Ansatz=ansatz, N_class=numClasses, backend=backend,
148
- Shots=shots, seed=randomstate, Layers=numLayers,
149
- Max_samples=maxSamples, Max_features=feature,
150
- LearningRate=learningRate, BatchSize=batch,
151
- Epoch=epochs, numPredictors=numPredictors
152
- )
153
-
154
- preprocessing = prepFactory.GetPreprocessing(ansatz=ansatz, embedding=embedding)
155
- X_train_processed = preprocessing.fit_transform(X_train, y=y_train)
156
- X_test_processed = preprocessing.transform(X_test)
157
-
158
- models_to_run.append((
159
- model, X_train_processed, y_train, X_test_processed, y_test,
160
- predictions, runs, customMetric, name, embedding, ansatz, feature
161
- ))
162
-
163
- if self.sequential or backend == "Lightning.GPU" or nqubits >= self.threshold:
164
- # Sequential execution
165
- for model_args in models_to_run:
166
- self._process_single_model(model_args, NAMES, EMBEDDINGS, ANSATZ, ACCURACY,
167
- B_ACCURACY, FEATURES, F1, TIME, CUSTOM)
168
- else:
169
- # Parallel execution
170
- with ThreadPoolExecutor(max_workers=min(len(models_to_run), psutil.cpu_count())) as executor:
171
- futures = []
172
- for model_args in models_to_run:
173
- future = executor.submit(self._process_single_model, model_args,
174
- NAMES, EMBEDDINGS, ANSATZ, ACCURACY,
175
- B_ACCURACY, FEATURES, F1, TIME, CUSTOM)
176
- futures.append(future)
177
-
178
- for future in futures:
179
- future.result()
180
-
181
- return self._create_results_dataframe(NAMES, EMBEDDINGS, ANSATZ, FEATURES,
182
- ACCURACY, B_ACCURACY, F1, CUSTOM, TIME,
183
- customMetric, showTable)
184
-
185
- def _process_single_model(self, model_args, NAMES, EMBEDDINGS, ANSATZ, ACCURACY,
186
- B_ACCURACY, FEATURES, F1, TIME, CUSTOM):
187
- model, X_train_p, y_train, X_test_p, y_test, preds, runs, custom_metric, \
188
- name, embedding, ansatz, feature = model_args
189
-
190
- exeT, accuracy, b_accuracy, f1, custom, _ = self._executeModel(
191
- model, X_train_p, y_train, X_test_p, y_test, preds, runs, custom_metric
192
- )
193
-
194
- NAMES.append(name)
195
- EMBEDDINGS.append(embedding)
196
- ANSATZ.append(ansatz)
197
- ACCURACY.append(accuracy)
198
- B_ACCURACY.append(b_accuracy)
199
- FEATURES.append(feature)
200
- F1.append(f1)
201
- TIME.append(exeT)
202
- CUSTOM.append(custom)
203
-
204
- def _create_results_dataframe(self, NAMES, EMBEDDINGS, ANSATZ, FEATURES,
205
- ACCURACY, B_ACCURACY, F1, CUSTOM, TIME,
206
- customMetric, showTable):
207
- columns = {
208
- "Model": NAMES,
209
- "Embedding": EMBEDDINGS,
210
- "Ansatz": ANSATZ,
211
- "Features": FEATURES,
212
- "Accuracy": ACCURACY,
213
- "Balanced Accuracy": B_ACCURACY,
214
- "F1 Score": F1,
215
- "Time taken": TIME,
216
- }
217
-
218
- if customMetric is not None:
219
- columns[customMetric.__name__] = CUSTOM
220
-
221
- scores = pd.DataFrame(columns)
222
-
223
- if showTable:
224
- print(scores.to_markdown())
225
-
226
- return scores
@@ -1,188 +0,0 @@
1
- from concurrent.futures import ThreadPoolExecutor
2
- from Utils.Utils import *
3
- import numpy as np
4
- import pandas as pd
5
- import math
6
- from Factories.Models.fModels import *
7
- from Factories.Preprocessing.fPreprocessing import *
8
- from sklearn.metrics import f1_score, accuracy_score, balanced_accuracy_score
9
- import time
10
-
11
- class Dispatcher:
12
- def __init__(self, sequential, threshold=27):
13
- self.sequential = sequential
14
- self.threshold = threshold
15
-
16
- def _executeModel(self, model, X_train, y_train, X_test, y_test, predictions, runs, customMetric):
17
- preds = []
18
- accuracyR, b_accuracyR, f1R, customR = 0, 0, 0, 0
19
- custom = None
20
- exeT = 0
21
-
22
- for j in range(runs):
23
- printer.print(f"\tExecuting {j+1} run of {runs}")
24
- start = time.time()
25
- model.fit(X=X_train, y=y_train)
26
- exeT += time.time() - start
27
- y_pred = model.predict(X=X_test)
28
-
29
- accuracyR += accuracy_score(y_test, y_pred, normalize=True)
30
- b_accuracyR += balanced_accuracy_score(y_test, y_pred)
31
- f1R += f1_score(y_test, y_pred, average="weighted")
32
- if customMetric is not None:
33
- customR += customMetric(y_test, y_pred)
34
-
35
- if predictions:
36
- preds = y_pred
37
- accuracy = accuracyR/runs
38
- b_accuracy = b_accuracyR/runs
39
- f1 = f1R/runs
40
- if customMetric is not None:
41
- custom = customR/runs
42
-
43
- exeT = exeT/runs # Average execution time
44
- return exeT, accuracy, b_accuracy, f1, custom, preds
45
-
46
- def _process_model(self, args):
47
- model, X_train, y_train, X_test, y_test, predictions, runs, customMetric = args
48
- return self._executeModel(model, X_train, y_train, X_test, y_test, predictions, runs, customMetric)
49
-
50
- def dispatch(self, nqubits, randomstate, predictions, shots,
51
- numPredictors, numLayers, classifiers, ansatzs, backend,
52
- embeddings, features, learningRate, epochs, runs, batch,
53
- maxSamples, verbose, customMetric, customImputerNum, customImputerCat,
54
- X_train, y_train, X_test, y_test, showTable=True):
55
-
56
- NAMES, EMBEDDINGS, ANSATZ, ACCURACY = [], [], [], []
57
- B_ACCURACY, FEATURES, F1, TIME, CUSTOM = [], [], [], [], []
58
-
59
- numClasses = len(np.unique(y_train))
60
-
61
- # Adjust qubits if necessary
62
- if (numClasses > 2**math.floor(math.log2(nqubits))):
63
- printer.print("The number of qubits must exceed the number of classes and be a power of 2 to execute all circuits successfully.")
64
- adjustedQubits = adjustQubits(nqubits=nqubits, numClasses=numClasses)
65
- printer.print(f"New number of qubits:\t{adjustedQubits}")
66
- else:
67
- adjustedQubits = nqubits
68
-
69
- # Convert input data to pandas DataFrames
70
- if not isinstance(X_train, pd.DataFrame):
71
- X_train = pd.DataFrame(X_train)
72
- if not isinstance(X_test, pd.DataFrame):
73
- X_test = pd.DataFrame(X_test)
74
-
75
- # Preprocessing
76
- prepFactory = PreprocessingFactory(nqubits)
77
- sanitizer = prepFactory.GetSanitizer(customImputerCat, customImputerNum)
78
- X_train = sanitizer.fit_transform(X_train)
79
- X_test = sanitizer.transform(X_test)
80
-
81
- combinations = create_combinations(classifiers=classifiers, embeddings=embeddings,
82
- features=features, ansatzs=ansatzs)
83
-
84
- models_to_run = []
85
- for combination in combinations:
86
- name, embedding, ansatz, feature = combination
87
- printer.print("="*100)
88
- feature = feature if feature is not None else "~"
89
- printer.print(f"Model: {name} Embedding: {embedding} Ansatz:{ansatz} Features: {feature}")
90
-
91
- model = ModelFactory().getModel(
92
- Nqubits=adjustedQubits, model=name, Embedding=embedding,
93
- Ansatz=ansatz, N_class=numClasses, backend=backend,
94
- Shots=shots, seed=randomstate, Layers=numLayers,
95
- Max_samples=maxSamples, Max_features=feature,
96
- LearningRate=learningRate, BatchSize=batch,
97
- Epoch=epochs, numPredictors=numPredictors
98
- )
99
-
100
- preprocessing = prepFactory.GetPreprocessing(ansatz=ansatz, embedding=embedding)
101
- X_train_processed = preprocessing.fit_transform(X_train, y=y_train)
102
- X_test_processed = preprocessing.transform(X_test)
103
-
104
- models_to_run.append((
105
- model, X_train_processed, y_train, X_test_processed, y_test,
106
- predictions, runs, customMetric, name, embedding, ansatz, feature
107
- ))
108
-
109
- if self.sequential or backend == "Lightning.GPU" or nqubits >= self.threshold:
110
- # Sequential execution
111
- for model_args in models_to_run:
112
- model, X_train_p, y_train, X_test_p, y_test, preds, runs, custom_metric, \
113
- name, embedding, ansatz, feature = model_args
114
-
115
- exeT, accuracy, b_accuracy, f1, custom, _ = self._executeModel(
116
- model, X_train_p, y_train, X_test_p, y_test, preds, runs, custom_metric
117
- )
118
-
119
- self._append_results(NAMES, EMBEDDINGS, ANSATZ, ACCURACY, B_ACCURACY,
120
- FEATURES, F1, TIME, CUSTOM, name, embedding, ansatz,
121
- feature, accuracy, b_accuracy, f1, exeT, custom)
122
- else:
123
- # Parallel execution
124
- with ThreadPoolExecutor(max_workers=min(len(models_to_run), psutil.cpu_count())) as executor:
125
- futures = []
126
- for model_args in models_to_run:
127
- model, X_train_p, y_train, X_test_p, y_test, preds, runs, custom_metric, \
128
- name, embedding, ansatz, feature = model_args
129
-
130
- future = executor.submit(self._executeModel, model, X_train_p, y_train,
131
- X_test_p, y_test, preds, runs, custom_metric)
132
- futures.append((future, name, embedding, ansatz, feature))
133
-
134
- for future, name, embedding, ansatz, feature in futures:
135
- exeT, accuracy, b_accuracy, f1, custom, _ = future.result()
136
- self._append_results(NAMES, EMBEDDINGS, ANSATZ, ACCURACY, B_ACCURACY,
137
- FEATURES, F1, TIME, CUSTOM, name, embedding, ansatz,
138
- feature, accuracy, b_accuracy, f1, exeT, custom)
139
-
140
- # Create and return results DataFrame
141
- return self._create_results_dataframe(NAMES, EMBEDDINGS, ANSATZ, FEATURES,
142
- ACCURACY, B_ACCURACY, F1, CUSTOM, TIME,
143
- customMetric, showTable)
144
-
145
- def _append_results(self, NAMES, EMBEDDINGS, ANSATZ, ACCURACY, B_ACCURACY,
146
- FEATURES, F1, TIME, CUSTOM, name, embedding, ansatz,
147
- feature, accuracy, b_accuracy, f1, exeT, custom):
148
- NAMES.append(name)
149
- EMBEDDINGS.append(embedding)
150
- ANSATZ.append(ansatz)
151
- ACCURACY.append(accuracy)
152
- B_ACCURACY.append(b_accuracy)
153
- FEATURES.append(feature)
154
- F1.append(f1)
155
- TIME.append(exeT)
156
- CUSTOM.append(custom)
157
-
158
- def _create_results_dataframe(self, NAMES, EMBEDDINGS, ANSATZ, FEATURES,
159
- ACCURACY, B_ACCURACY, F1, CUSTOM, TIME,
160
- customMetric, showTable):
161
- if customMetric is None:
162
- scores = pd.DataFrame({
163
- "Model": NAMES,
164
- "Embedding": EMBEDDINGS,
165
- "Ansatz": ANSATZ,
166
- "Features": FEATURES,
167
- "Accuracy": ACCURACY,
168
- "Balanced Accuracy": B_ACCURACY,
169
- "F1 Score": F1,
170
- "Time taken": TIME,
171
- })
172
- else:
173
- scores = pd.DataFrame({
174
- "Model": NAMES,
175
- "Embedding": EMBEDDINGS,
176
- "Ansatz": ANSATZ,
177
- "Features": FEATURES,
178
- "Accuracy": ACCURACY,
179
- "Balanced Accuracy": B_ACCURACY,
180
- "F1 Score": F1,
181
- customMetric.__name__: CUSTOM,
182
- "Time taken": TIME,
183
- })
184
-
185
- if showTable:
186
- print(scores.to_markdown())
187
-
188
- return scores
@@ -1,201 +0,0 @@
1
- from multiprocessing import Pool
2
- from Utils.Utils import *
3
- import numpy as np
4
- import pandas as pd
5
- import math
6
- from Factories.Models.fModels import *
7
- from Factories.Preprocessing.fPreprocessing import *
8
- from sklearn.metrics import f1_score, accuracy_score, balanced_accuracy_score
9
- import time
10
- import psutil
11
-
12
- class Dispatcher:
13
- def __init__(self, sequential, threshold=27):
14
- self.sequential = sequential
15
- self.threshold = threshold
16
-
17
- def _executeModel(self, args):
18
- model_params, X_train, y_train, X_test, y_test, predictions, runs, customMetric = args
19
-
20
- # Recreate the model in this process
21
- model = ModelFactory().getModel(**model_params)
22
-
23
- preds = []
24
- accuracyR, b_accuracyR, f1R, customR = 0, 0, 0, 0
25
- custom = None
26
- exeT = 0
27
-
28
- for j in range(runs):
29
- start = time.time()
30
- model.fit(X=X_train, y=y_train)
31
- exeT += time.time() - start
32
- y_pred = model.predict(X=X_test)
33
-
34
- accuracyR += accuracy_score(y_test, y_pred, normalize=True)
35
- b_accuracyR += balanced_accuracy_score(y_test, y_pred)
36
- f1R += f1_score(y_test, y_pred, average="weighted")
37
- if customMetric is not None:
38
- customR += customMetric(y_test, y_pred)
39
-
40
- if predictions:
41
- preds = y_pred
42
- accuracy = accuracyR/runs
43
- b_accuracy = b_accuracyR/runs
44
- f1 = f1R/runs
45
- if customMetric is not None:
46
- custom = customR/runs
47
-
48
- exeT = exeT/runs
49
- return exeT, accuracy, b_accuracy, f1, custom, preds
50
-
51
- def dispatch(self, nqubits, randomstate, predictions, shots,
52
- numPredictors, numLayers, classifiers, ansatzs, backend,
53
- embeddings, features, learningRate, epochs, runs, batch,
54
- maxSamples, verbose, customMetric, customImputerNum, customImputerCat,
55
- X_train, y_train, X_test, y_test, showTable=True):
56
-
57
- NAMES, EMBEDDINGS, ANSATZ, ACCURACY = [], [], [], []
58
- B_ACCURACY, FEATURES, F1, TIME, CUSTOM = [], [], [], [], []
59
-
60
- numClasses = len(np.unique(y_train))
61
-
62
- # Adjust qubits if necessary
63
- if (numClasses > 2**math.floor(math.log2(nqubits))):
64
- printer.print("The number of qubits must exceed the number of classes and be a power of 2.")
65
- adjustedQubits = adjustQubits(nqubits=nqubits, numClasses=numClasses)
66
- printer.print(f"New number of qubits:\t{adjustedQubits}")
67
- else:
68
- adjustedQubits = nqubits
69
-
70
- # Convert input data to pandas DataFrames if needed
71
- if not isinstance(X_train, pd.DataFrame):
72
- X_train = pd.DataFrame(X_train)
73
- if not isinstance(X_test, pd.DataFrame):
74
- X_test = pd.DataFrame(X_test)
75
-
76
- # Preprocessing
77
- prepFactory = PreprocessingFactory(nqubits)
78
- sanitizer = prepFactory.GetSanitizer(customImputerCat, customImputerNum)
79
- X_train = sanitizer.fit_transform(X_train)
80
- X_test = sanitizer.transform(X_test)
81
-
82
- combinations = create_combinations(classifiers=classifiers, embeddings=embeddings,
83
- features=features, ansatzs=ansatzs)
84
-
85
- execution_args = []
86
- for combination in combinations:
87
- name, embedding, ansatz, feature = combination
88
- printer.print("="*100)
89
- feature = feature if feature is not None else "~"
90
- printer.print(f"Model: {name} Embedding: {embedding} Ansatz:{ansatz} Features: {feature}")
91
-
92
- # Instead of creating the model, we'll just store its parameters
93
- model_params = {
94
- 'Nqubits': adjustedQubits,
95
- 'model': name,
96
- 'Embedding': embedding,
97
- 'Ansatz': ansatz,
98
- 'N_class': numClasses,
99
- 'backend': backend,
100
- 'Shots': shots,
101
- 'seed': randomstate,
102
- 'Layers': numLayers,
103
- 'Max_samples': maxSamples,
104
- 'Max_features': feature,
105
- 'LearningRate': learningRate,
106
- 'BatchSize': batch,
107
- 'Epoch': epochs,
108
- 'numPredictors': numPredictors
109
- }
110
-
111
- preprocessing = prepFactory.GetPreprocessing(ansatz=ansatz, embedding=embedding)
112
- X_train_processed = preprocessing.fit_transform(X_train, y=y_train)
113
- X_test_processed = preprocessing.transform(X_test)
114
-
115
- execution_args.append((
116
- model_params, X_train_processed, y_train, X_test_processed, y_test,
117
- predictions, runs, customMetric, name, embedding, ansatz, feature
118
- ))
119
-
120
- if self.sequential or backend == "Lightning.GPU" or nqubits >= self.threshold:
121
- # Sequential execution
122
- for args in execution_args:
123
- model_params, X_train_p, y_train, X_test_p, y_test, preds, runs, \
124
- custom_metric, name, embedding, ansatz, feature = args
125
-
126
- exe_args = (model_params, X_train_p, y_train, X_test_p, y_test,
127
- preds, runs, custom_metric)
128
- exeT, accuracy, b_accuracy, f1, custom, _ = self._executeModel(exe_args)
129
-
130
- self._append_results(NAMES, EMBEDDINGS, ANSATZ, ACCURACY, B_ACCURACY,
131
- FEATURES, F1, TIME, CUSTOM, name, embedding, ansatz,
132
- feature, accuracy, b_accuracy, f1, exeT, custom)
133
- else:
134
- # Parallel execution using multiprocessing
135
- num_processes = min(len(execution_args), psutil.cpu_count(logical=False))
136
- with Pool(processes=num_processes) as pool:
137
- results = []
138
- for args in execution_args:
139
- model_params, X_train_p, y_train, X_test_p, y_test, preds, runs, \
140
- custom_metric, name, embedding, ansatz, feature = args
141
-
142
- exe_args = (model_params, X_train_p, y_train, X_test_p, y_test,
143
- preds, runs, custom_metric)
144
- result = pool.apply_async(self._executeModel, (exe_args,))
145
- results.append((result, name, embedding, ansatz, feature))
146
-
147
- for result, name, embedding, ansatz, feature in results:
148
- exeT, accuracy, b_accuracy, f1, custom, _ = result.get()
149
- self._append_results(NAMES, EMBEDDINGS, ANSATZ, ACCURACY, B_ACCURACY,
150
- FEATURES, F1, TIME, CUSTOM, name, embedding, ansatz,
151
- feature, accuracy, b_accuracy, f1, exeT, custom)
152
-
153
- return self._create_results_dataframe(NAMES, EMBEDDINGS, ANSATZ, FEATURES,
154
- ACCURACY, B_ACCURACY, F1, CUSTOM, TIME,
155
- customMetric, showTable)
156
-
157
- # _append_results and _create_results_dataframe methods remain unchanged
158
- def _append_results(self, NAMES, EMBEDDINGS, ANSATZ, ACCURACY, B_ACCURACY,
159
- FEATURES, F1, TIME, CUSTOM, name, embedding, ansatz,
160
- feature, accuracy, b_accuracy, f1, exeT, custom):
161
- NAMES.append(name)
162
- EMBEDDINGS.append(embedding)
163
- ANSATZ.append(ansatz)
164
- ACCURACY.append(accuracy)
165
- B_ACCURACY.append(b_accuracy)
166
- FEATURES.append(feature)
167
- F1.append(f1)
168
- TIME.append(exeT)
169
- CUSTOM.append(custom)
170
-
171
- def _create_results_dataframe(self, NAMES, EMBEDDINGS, ANSATZ, FEATURES,
172
- ACCURACY, B_ACCURACY, F1, CUSTOM, TIME,
173
- customMetric, showTable):
174
- if customMetric is None:
175
- scores = pd.DataFrame({
176
- "Model": NAMES,
177
- "Embedding": EMBEDDINGS,
178
- "Ansatz": ANSATZ,
179
- "Features": FEATURES,
180
- "Accuracy": ACCURACY,
181
- "Balanced Accuracy": B_ACCURACY,
182
- "F1 Score": F1,
183
- "Time taken": TIME,
184
- })
185
- else:
186
- scores = pd.DataFrame({
187
- "Model": NAMES,
188
- "Embedding": EMBEDDINGS,
189
- "Ansatz": ANSATZ,
190
- "Features": FEATURES,
191
- "Accuracy": ACCURACY,
192
- "Balanced Accuracy": B_ACCURACY,
193
- "F1 Score": F1,
194
- customMetric.__name__: CUSTOM,
195
- "Time taken": TIME,
196
- })
197
-
198
- if showTable:
199
- print(scores.to_markdown())
200
-
201
- return scores
@@ -1,2 +0,0 @@
1
- def executeQNNBag(combinations):
2
- pass
@@ -1,2 +0,0 @@
1
- def executeQNN(combinations):
2
- pass