lazyqml 2.0.5__py2.py3-none-any.whl → 3.0.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. lazyqml/Factories/Circuits/AmplitudeEmbedding.py +1 -1
  2. lazyqml/Factories/Circuits/HCzRx.py +1 -1
  3. lazyqml/Factories/Circuits/HardwareEfficient.py +1 -1
  4. lazyqml/Factories/Circuits/RxEmbedding.py +1 -1
  5. lazyqml/Factories/Circuits/RyEmbedding.py +1 -1
  6. lazyqml/Factories/Circuits/RzEmbedding.py +1 -1
  7. lazyqml/Factories/Circuits/TreeTensor.py +1 -1
  8. lazyqml/Factories/Circuits/TwoLocal.py +1 -1
  9. lazyqml/Factories/Circuits/ZzEmbedding.py +1 -1
  10. lazyqml/Factories/Circuits/fCircuits.py +10 -10
  11. lazyqml/Factories/Dispatchers/Dispatcher.py +264 -85
  12. lazyqml/Factories/Models/Hybrid.py +460 -0
  13. lazyqml/Factories/Models/QNNBag.py +6 -6
  14. lazyqml/Factories/Models/QNNTorch.py +8 -8
  15. lazyqml/Factories/Models/QSVM.py +3 -3
  16. lazyqml/Factories/Models/_QNNPennylane.py +4 -4
  17. lazyqml/Factories/Models/fModels.py +4 -4
  18. lazyqml/Factories/Preprocessing/Pca.py +2 -2
  19. lazyqml/Factories/Preprocessing/Sanitizer.py +2 -2
  20. lazyqml/Factories/Preprocessing/fPreprocessing.py +5 -24
  21. lazyqml/Global/globalEnums.py +3 -1
  22. lazyqml/Interfaces/iAnsatz.py +1 -1
  23. lazyqml/Utils/Utils.py +203 -84
  24. lazyqml/Utils/Validator.py +4 -7
  25. lazyqml/__init__.py +1 -1
  26. lazyqml/lazyqml.py +54 -49
  27. lazyqml-3.0.0.dist-info/LICENSE +21 -0
  28. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/METADATA +48 -35
  29. lazyqml-3.0.0.dist-info/RECORD +40 -0
  30. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/WHEEL +1 -1
  31. lazyqml/.lazyqmlP.py +0 -293
  32. lazyqml/.lazyqmlVote.py +0 -303
  33. lazyqml/Factories/Circuits/_Qkernel.py +0 -16
  34. lazyqml/Factories/Circuits/_Qnn.py +0 -17
  35. lazyqml/Factories/Dispatchers/DispatcherCV.py +0 -143
  36. lazyqml/Factories/Dispatchers/DispatcherNumba.py +0 -226
  37. lazyqml/Factories/Dispatchers/_Dispatcher.py +0 -188
  38. lazyqml/Factories/Dispatchers/_DispatcherMultiprocessing.py +0 -201
  39. lazyqml/Factories/Dispatchers/_QNNBagdispatcher.py +0 -2
  40. lazyqml/Factories/Dispatchers/_QNNdispatcher.py +0 -2
  41. lazyqml/Factories/Dispatchers/_QSVMdispatcher.py +0 -112
  42. lazyqml/Factories/Dispatchers/__Dispatcher.py +0 -193
  43. lazyqml/Factories/Preprocessing/_PcaAmp.py +0 -22
  44. lazyqml/Factories/Preprocessing/_PcaTree.py +0 -22
  45. lazyqml/Factories/Preprocessing/_PcaTreeAmp.py +0 -22
  46. lazyqml/Lanza copy.sh +0 -32
  47. lazyqml/Lanza.sh +0 -21
  48. lazyqml/mem.py +0 -85
  49. lazyqml-2.0.5.dist-info/RECORD +0 -56
  50. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/AUTHORS.rst +0 -0
  51. /lazyqml-2.0.5.dist-info/LICENSE → /lazyqml-3.0.0.dist-info/LICENSE copy +0 -0
  52. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/entry_points.txt +0 -0
  53. {lazyqml-2.0.5.dist-info → lazyqml-3.0.0.dist-info}/top_level.txt +0 -0
@@ -1,18 +1,19 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lazyqml
3
- Version: 2.0.5
3
+ Version: 3.0.0
4
4
  Summary: LazyQML benchmarking utility to test quantum machine learning models.
5
- Author-email: Diego García Vega <garciavdiego@uniovi.es>, Fernando Álvaro Plou Llorente <ploufernando@uniovi.es>, Alejandro Leal Castaño <lealcalejandro@uniovi.es>
5
+ Author-email: QHPC Group <qhpcgroup@gmail.com>
6
6
  License: MIT License
7
- Project-URL: Homepage, https://github.com/DiegoGV-Uniovi/lazyqml
7
+ Project-URL: Homepage, https://github.com/QHPC-SP-Research-Lab/LazyQML
8
8
  Keywords: lazyqml
9
9
  Classifier: Intended Audience :: Science/Research
10
10
  Classifier: License :: OSI Approved :: MIT License
11
11
  Classifier: Natural Language :: English
12
- Classifier: Programming Language :: Python :: 3.9
13
- Requires-Python: >=3.8
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Requires-Python: >=3.9
14
14
  Description-Content-Type: text/markdown
15
15
  License-File: LICENSE
16
+ License-File: LICENSE copy
16
17
  License-File: AUTHORS.rst
17
18
  Requires-Dist: wheel
18
19
  Requires-Dist: tabulate
@@ -22,36 +23,68 @@ Requires-Dist: torchvision
22
23
  Requires-Dist: scipy
23
24
  Requires-Dist: scikit-learn
24
25
  Requires-Dist: PennyLane
25
- Requires-Dist: PennyLane-Lightning
26
- Requires-Dist: PennyLane-Lightning-GPU
27
- Requires-Dist: custatevec-cu12
26
+ Requires-Dist: PennyLane_Lightning
27
+ Requires-Dist: PennyLane_Lightning_GPU
28
+ Requires-Dist: custatevec_cu12
28
29
  Requires-Dist: ucimlrepo
29
30
  Requires-Dist: pydantic
30
31
  Requires-Dist: psutil
31
32
  Requires-Dist: pandas
32
33
  Requires-Dist: joblib
34
+ Requires-Dist: gputil
33
35
  Provides-Extra: all
34
- Requires-Dist: lazyqml[extra] ; extra == 'all'
36
+ Requires-Dist: lazyqml[extra]; extra == "all"
35
37
  Provides-Extra: extra
36
- Requires-Dist: pandas ; extra == 'extra'
38
+ Requires-Dist: pandas; extra == "extra"
37
39
 
38
40
  # LazyQML
39
41
 
40
42
 
41
- [![image](https://img.shields.io/pypi/v/lazyqml.svg)](https://pypi.python.org/pypi/lazyqml)
43
+ [![image](https://img.shields.io/badge/pypi-%23ececec.svg?style=for-the-badge&logo=pypi&logoColor=1f73b7)](https://pypi.python.org/pypi/lazyqml)
44
+ ![GitHub Actions](https://img.shields.io/badge/github%20actions-%232671E5.svg?style=for-the-badge&logo=githubactions&logoColor=white)
45
+ ![NumPy](https://img.shields.io/badge/numpy-%23013243.svg?style=for-the-badge&logo=numpy&logoColor=white)
46
+ ![Pandas](https://img.shields.io/badge/pandas-%23150458.svg?style=for-the-badge&logo=pandas&logoColor=white)
47
+ ![PyTorch](https://img.shields.io/badge/PyTorch-%23EE4C2C.svg?style=for-the-badge&logo=PyTorch&logoColor=white)
48
+ ![scikit-learn](https://img.shields.io/badge/scikit--learn-%23F7931E.svg?style=for-the-badge&logo=scikit-learn&logoColor=white)
49
+ ![nVIDIA](https://img.shields.io/badge/cuda-000000.svg?style=for-the-badge&logo=nVIDIA&logoColor=green)
50
+ <img src="https://assets.cloud.pennylane.ai/pennylane_website/generic/logo.svg" alt="Pennylane Logo" style="background-color: white; padding: 2px;" />
51
+ ![Linux](https://img.shields.io/badge/Linux-FCC624?style=for-the-badge&logo=linux&logoColor=black)
42
52
 
53
+ <!-- ![Pennylane](https://assets.cloud.pennylane.ai/pennylane_website/generic/logo.svg) -->
43
54
 
44
- **pLazyQML: A parallel package for efficient execution of QML models on classical computers**
55
+ pLazyQML, a software package designed to accelerate, automate, and streamline experimentation with quantum machine learning models on classical computers. pLazyQML reduces the complexity and time required for developing and testing quantum-enhanced machine learning models.
56
+ ## Usage
57
+ ```python
58
+ from lazyqml.lazyqml import QuantumClassifier
59
+ from lazyqml.Global.globalEnums import *
60
+ from sklearn.datasets import load_breast_cancer, load_iris
61
+ from sklearn.model_selection import train_test_split
62
+ # Load data
63
+ data = load_iris()
64
+ X = data.data
65
+ y = data.target
45
66
 
67
+ # Split data
68
+ X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.3,random_state =123)
69
+
70
+ # Initialize L azyClass ifier
71
+ classifier = QuantumClassifier(nqubits={4,8,16},verbose=True,sequential=False,backend=Backend.lightningQubit)
72
+
73
+ # Fit and predict
74
+ classifier.fit(X_train=X_train,y_train=y_train,X_test=X_test,y_test=y_test)
75
+ ```
46
76
 
77
+ ## License & Compatibility
47
78
  - Free software: MIT License
79
+ - This Python package is only compatible with Linux systems.
80
+ - Hardware acceleration is only enabled using CUDA-compatible devices.
48
81
  ## Quantum and High Performance Computing (QHPC) - University of Oviedo
49
82
  - José Ranilla Pastor - ranilla@uniovi.es
50
83
  - Elías Fernández Combarro - efernandezca@uniovi.es
51
- - Diego García Vega - garciavdiego@uniovi.es
84
+ - Diego García Vega - diegogarciavega@gmail.com
52
85
  - Fernando Álvaro Plou Llorente - ploufernando@uniovi.es
53
86
  - Alejandro Leal Castaño - lealcalejandro@uniovi.es
54
- - Group - https://qhpc.grupos.uniovi.es/
87
+ - Group - https://qhpc.uniovi.es
55
88
 
56
89
  ## Parameters:
57
90
  - **verbose** _bool, optional (default=False)_: If set to True, detailed messages about the training process will be displayed, helping users to monitor the progress and debug if necessary.
@@ -75,7 +108,7 @@ Requires-Dist: pandas ; extra == 'extra'
75
108
  - **threshold** _int, optional (default=22)_: Integer value used to delimit from which number of qubits the internal operations of the models start to be parallelized. This helps optimize performance for larger quantum circuits.
76
109
  - **cores** _int, optional (default=-1)_: Number of processes to be created by the dispatcher to run the selected models. Each process will be allocated as many CPU cores as possible for parallel execution.
77
110
  ## Functions:
78
- - **fit** _(X_train, Y_train, X_test, Y_test, showTable=True)_: Fit Classification algorithms to X_train and y_train, predict and score on X_test, y_test.
111
+ - **fit** _(X, y, test\_size, showTable=True)_: Fit Classification algorithms to X and y using hold-out, predict and score on test set (test_size).
79
112
  If the dimensions of the training vectors are not compatible with the different models, a
80
113
  PCA transformation will be used in order to reduce the dimensionality to a compatible space.
81
114
  All categories must be in the training data if there are new categories in the test date the
@@ -86,24 +119,4 @@ Requires-Dist: pandas ; extra == 'extra'
86
119
 
87
120
  - **leave_one_out** _(X, y, showTable=True)_: Perform leave-one-out cross-validation on the given dataset and model. This method splits the dataset into multiple train-test splits using LeaveOneOut,
88
121
  fits the model on the training set, evaluates it on the validation set, and aggregates the results.
89
- - **glue_hybrid** _(X,y,model,showTable=True)_:This function takes both the training and test data, along with a user-provided torch model. It connects the given model to a fully connected layer, which acts as a bridge between the classical neural network and the selected Quantum Neural Networks (QNNs). The combined model is then used to train on the data and make predictions on the test set, leveraging the strengths of both classical and quantum approaches.
90
- ## Usage:
91
- ```python
92
- from lazyqml.lazyqml import QuantumClassifier
93
- from lazyqml.Global.globalEnums import *
94
- from sklearn.datasets import load_breast_cancer, load_iris
95
- from sklearn.model_selection import train_test_split
96
- # Load data
97
- data = load_iris()
98
- X = data.data
99
- y = data.target
100
122
 
101
- # Split data
102
- X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.3,random_state =123)
103
-
104
- # Initialize L azyClass ifier
105
- classifier = QuantumClassifier(nqubits={4,8,16},verbose=True,sequential=False,backend=Backend.lightningQubit)
106
-
107
- # Fit and predict
108
- classifier.fit(X_train=X_train,y_train=y_train,X_test=X_test,y_test=y_test)
109
- ```
@@ -0,0 +1,40 @@
1
+ lazyqml/__init__.py,sha256=AO5qs-YuLME2DjkZikanGaXV4iBUKGlyBKDEUtbeyIg,242
2
+ lazyqml/lazyqml.py,sha256=7MD8Jy7t9gEFZ2BluRBu8W1c-pFb-Elm2s9uHAWteqI,14703
3
+ lazyqml/Factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ lazyqml/Factories/Circuits/AmplitudeEmbedding.py,sha256=P49rPc9aEK558yx8WYyzRGWfLuLCoJr_OjdOvWqDTbo,638
5
+ lazyqml/Factories/Circuits/HCzRx.py,sha256=VQ4bCJ7_iAJ5d2mSRbpLqbEV3U8azinvdX2oZnXoA1M,1157
6
+ lazyqml/Factories/Circuits/HardwareEfficient.py,sha256=f8qNl7fGcvIOUKVbwQ2Uey_4J7W8QZAqAE5auqU4huo,1804
7
+ lazyqml/Factories/Circuits/RxEmbedding.py,sha256=aVT6nuqJNJXUCGRMCf6gh3Wp3x4ZmPfd6b7XrqrqV50,623
8
+ lazyqml/Factories/Circuits/RyEmbedding.py,sha256=rJ4NUSHNaY-TO1d9RVoDBY9Yv56-1xIA6I5nUyosdK0,623
9
+ lazyqml/Factories/Circuits/RzEmbedding.py,sha256=6RoWWA4DTrObGRsy7ffR2a9vrK7cPBJE2uAGphrY3yc,691
10
+ lazyqml/Factories/Circuits/TreeTensor.py,sha256=IXZrbzfGS6TDt-EAyCUJ1yYluaie2nrZyEmZnOUedL0,1290
11
+ lazyqml/Factories/Circuits/TwoLocal.py,sha256=s73of6_vossAn5VLKjIQw0AaiHYTi0jh9cpmLBLg-8U,962
12
+ lazyqml/Factories/Circuits/ZzEmbedding.py,sha256=irIV05Nn95NbTxb9OYt0qR2mXxPDfKRhNTw-1a-lss8,1062
13
+ lazyqml/Factories/Circuits/fCircuits.py,sha256=YzEQiO-4XKJ_XoRU9CHybnI5Stqh7bmeZCeMVLs4kKg,1660
14
+ lazyqml/Factories/Dispatchers/Dispatcher.py,sha256=ndKqkD-9pmZ99arnWcADQMrSNjIrsR_KiHmKDi8RXRU,14454
15
+ lazyqml/Factories/Models/Hybrid.py,sha256=R9Ztzhbut4A_87z7viUlH5RtboFfmRkxYqxO9ABkXkc,17740
16
+ lazyqml/Factories/Models/QNNBag.py,sha256=UKTE8FpVkcxBBbE9MnL_cWi07idXUaF6sGCsKZ2OJzE,6560
17
+ lazyqml/Factories/Models/QNNTorch.py,sha256=mJsr2uGaQ8teEB3gsF5aUZ8j_IlJSdOZsdnvLVJanLs,6588
18
+ lazyqml/Factories/Models/QSVM.py,sha256=Jc-PHL8yn7VBrXEt4i44jsbsQHKF-FgwXvSWWwutK_A,3225
19
+ lazyqml/Factories/Models/_QNNPennylane.py,sha256=cn7lSZ0tEps61xleIeI7oWyGxeotiwQc74dYVFe-rIc,3380
20
+ lazyqml/Factories/Models/fModels.py,sha256=5Nsx7UFH_De4f5pMvx0D2giUIgKgA4q29g7_NwLybxc,1342
21
+ lazyqml/Factories/Preprocessing/Pca.py,sha256=_Hd5cZWTuurmtiyBSK1TalmIIr9ZTEhxwo-DDNL5KRw,963
22
+ lazyqml/Factories/Preprocessing/Sanitizer.py,sha256=OmBEvjzKbozwQ_4HrkwYjkRBtKag0zlAmEWvp8WG7Os,1109
23
+ lazyqml/Factories/Preprocessing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ lazyqml/Factories/Preprocessing/fPreprocessing.py,sha256=YyCpVUDxe4RFXUvJFV4U-U1Cwl6wMPuzurJwYmpLnNw,818
25
+ lazyqml/Global/globalEnums.py,sha256=Qc4Xep-LnubsxUvb8ZW5h-XZVjLRFXWfv040NXIt4HY,962
26
+ lazyqml/Interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
+ lazyqml/Interfaces/iAnsatz.py,sha256=HzHRRg7OqKOJAW-I-huDdxnFwaTZtP7953VmranOzC8,156
28
+ lazyqml/Interfaces/iCircuit.py,sha256=L-GbUQmJ2s0uc5AjJj4zIruWP7XSycpG-C8xI4_wnxc,115
29
+ lazyqml/Interfaces/iModel.py,sha256=W_yYoZd7hS8ZKGvNAyl-waJ6f7F0kIM6zrg9WqyPt_I,176
30
+ lazyqml/Interfaces/iPreprocessing.py,sha256=O9f2tCnWTbjkHi4_XKaPWsE2SWKsdI1pj2lCyBWih9o,255
31
+ lazyqml/Utils/Utils.py,sha256=0p9T0ObTTb5_GfIxKNhjufvn9tbgSs-NdEvDrfbwZsQ,10026
32
+ lazyqml/Utils/Validator.py,sha256=6Ox2A5WAV5GYPuFfB873mpEqCHSmhhSwFV_LGI-ZJ5s,4622
33
+ lazyqml-3.0.0.dist-info/AUTHORS.rst,sha256=Y_bDRslOAz5wcAYrTAnjDlmlW-51LfVF0Xwf09TbW3Y,245
34
+ lazyqml-3.0.0.dist-info/LICENSE,sha256=42X2ZTCkjjhUks41WOjPmwX8sbCfgf431zzdCne6gqE,1079
35
+ lazyqml-3.0.0.dist-info/LICENSE copy,sha256=y9EYvUN_l9ZWDuiVcxHE1NWbfy_HR9Z-8G19719ynbQ,1077
36
+ lazyqml-3.0.0.dist-info/METADATA,sha256=Kv_3g2Eb76x3PqbzsFM5r3vYvRvwdyQh9bDO3buCZDM,9615
37
+ lazyqml-3.0.0.dist-info/WHEEL,sha256=pxeNX5JdtCe58PUSYP9upmc7jdRPgvT0Gm9kb1SHlVw,109
38
+ lazyqml-3.0.0.dist-info/entry_points.txt,sha256=I0WR08yVIeXjSa8XBSGZ9SsZtM8uMvKwZOdU1qQajao,45
39
+ lazyqml-3.0.0.dist-info/top_level.txt,sha256=x2ffpytT-NeXmC7YaZLSQNMLK0pLfUiRmGOqwNbyjZE,8
40
+ lazyqml-3.0.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.4.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py2-none-any
5
5
  Tag: py3-none-any
lazyqml/.lazyqmlP.py DELETED
@@ -1,293 +0,0 @@
1
- import numpy as np
2
- import pandas as pd
3
- import sys
4
- from tabulate import tabulate
5
- from pydantic import BaseModel, Field, model_validator, field_validator, ValidationError, conset
6
- from pydantic.config import ConfigDict
7
- from typing import List, Callable, Optional, Set
8
- from typing_extensions import Annotated, Set
9
- from Factories.Preprocessing.fPreprocessing import PreprocessingFactory
10
- from Global.globalEnums import *
11
- from Utils.Utils import *
12
- from Utils.Validator import *
13
- from Factories.Dispatchers.Dispatcher import *
14
- from sklearn.impute import SimpleImputer
15
- from ucimlrepo import fetch_ucirepo
16
- from sklearn.preprocessing import LabelEncoder
17
-
18
- class QuantumClassifier(BaseModel):
19
- """
20
- This module helps in fitting to all the classification algorithms that are available in Scikit-learn
21
- Parameters
22
- ----------
23
- verbose : bool, optional (default=False)
24
- Verbose True for showing every training message during the fit.
25
- ignoreWarnings : bool, optional (default=True)
26
- When set to True, the warning related to algorigms that are not able to run are ignored.
27
- customMetric : function, optional (default=None)
28
- When function is provided, models are evaluated based on the custom evaluation metric provided.
29
- customImputerNum : function, optional (default=None)
30
- When function is provided, models are imputed based on the custom numeric imputer provided.
31
- customImputerCat : function, optional (default=None)
32
- When function is provided, models are imputed based on the custom categorical imputer provided.
33
- prediction : bool, optional (default=False)
34
- When set to True, the predictions of all the models models are returned as a pandas dataframe.
35
- classifiers : list of strings, optional (default=["all"])
36
- When function is provided, trains the chosen classifier(s) ["all", "qsvm", "qnn", "qnnbag"].
37
- embeddings : list of strings, optional (default=["all"])
38
- When function is provided, trains the chosen embeddings(s) ["all", "amplitude_embedding", "ZZ_embedding", "rx_embedding", "rz_embedding", "ry_embedding"].
39
- ansatzs : list of strings, optional (default=["all"])
40
- When function is provided, trains the chosen ansatzs(s) ["all", "HPzRx", "tree_tensor", "two_local", "hardware_efficient"].
41
- randomSate : int, optional (default=1234)
42
- This integer is used as a seed for the repeatability of the experiments.
43
- nqubits : int, optional (default=8)
44
- This integer is used for defining the number of qubits of the quantum circuits that the models will use.
45
- numLayers : int, optional (default=5)
46
- The number of layers that the Quantum Neural Network (QNN) models will use, is set to 5 by default.
47
- numPredictors : int, optional (default=10)
48
- The number of different predictoras that the Quantum Neural Networks with Bagging (QNN_Bag) will use, is set to 10 by default.
49
- learningRate : int, optional (default=0.01)
50
- The parameter that will be used for the optimization process of all the Quantum Neural Networks (QNN) in the gradient descent, is set to 0.01 by default.
51
- epochs : int, optional (default=100)
52
- The number of complete passes that will be done over the dataset during the fitting of the models.
53
- runs : int, optional (default=1)
54
- The number of training runs that will be done with the Quantum Neural Network (QNN) models.
55
- maxSamples : float, optiona (default=1.0)
56
- A floating point number between 0 and 1.0 that indicates the percentage of the dataset that will be used for each Quantum Neural Network with Bagging (QNN_Bag).
57
-
58
- Examples
59
- --------
60
- >>> from lazyqml.supervised import QuantumClassifier
61
- >>> from sklearn.datasets import load_breast_cancer
62
- >>> from sklearn.model_selection import train_test_split
63
- >>> data = load_breast_cancer()
64
- >>> X = data.data
65
- >>> y= data.target
66
- >>> X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.5,random_state =123)
67
- >>> clf = QuantumClassifier(verbose=0,ignore_warnings=True, customMetric=None)
68
- >>> models,predictions = clf.fit(X_train, X_test, y_train, y_test)
69
- >>> model_dictionary = clf.provide_models(X_train,X_test,y_train,y_test)
70
- >>> models
71
- | Model | Embedding | Ansatz | Accuracy | Balanced Accuracy | ROC AUC | F1 Score | Time taken |
72
- |:------------|:--------------------|:-------------------|-----------:|--------------------:|----------:|-----------:|-------------:|
73
- | qsvm | amplitude_embedding | ~ | 0.807018 | 0.782339 | 0.782339 | 0.802547 | 43.7487 |
74
- | qnn | amplitude_embedding | hardware_efficient | 0.77193 | 0.743218 | 0.743218 | 0.765533 | 7.92101 |
75
- | qnn | ry_embedding | hardware_efficient | 0.71345 | 0.689677 | 0.689677 | 0.709295 | 8.00107 |
76
- .....................................................................................................................................
77
- #####################################################################################################################################
78
- .....................................................................................................................................
79
- | qnn | ZZ_embedding | two_local | 0.461988 | 0.455954 | 0.455954 | 0.467481 | 2.13294 |
80
- """
81
- model_config = ConfigDict(strict=True)
82
-
83
- # nqubits: Annotated[int, Field(gt=0)] = 8
84
- nqubits: Annotated[Set[int], Field(description="Set of qubits, each must be greater than 0")]
85
- randomstate: int = 1234
86
- predictions: bool = False
87
- ignoreWarnings: bool = True
88
- sequential: bool = False
89
- numPredictors: Annotated[int, Field(gt=0)] = 10
90
- numLayers: Annotated[int, Field(gt=0)] = 5
91
- classifiers: Annotated[Set[Model], Field(min_items=1)] = {Model.ALL}
92
- ansatzs: Annotated[Set[Ansatzs], Field(min_items=1)] = {Ansatzs.ALL}
93
- embeddings: Annotated[Set[Embedding], Field(min_items=1)] = {Embedding.ALL}
94
- backend: Backend = Backend.lightningQubit
95
- features: Annotated[Set[float], Field(min_items=1)] = {0.3, 0.5, 0.8}
96
- learningRate: Annotated[float, Field(gt=0)] = 0.01
97
- epochs: Annotated[int, Field(gt=0)] = 100
98
- shots: Annotated[int, Field(gt=0)] = 1
99
- runs: Annotated[int, Field(gt=0)] = 1
100
- batchSize: Annotated[int, Field(gt=0)] = 8
101
- threshold: Annotated[int, Field(gt=0)] = 26
102
- maxSamples: Annotated[float, Field(gt=0, le=1)] = 1.0
103
- verbose: bool = False
104
- customMetric: Optional[Callable] = None
105
- customImputerNum: Optional[Any] = None
106
- customImputerCat: Optional[Any] = None
107
- batch: Optional[bool] = True
108
-
109
- @field_validator('nqubits', mode='before')
110
- def check_nqubits_positive(cls, value):
111
- if not isinstance(value, set):
112
- raise TypeError('nqubits must be a set of integers')
113
-
114
- if any(v <= 0 for v in value):
115
- raise ValueError('Each value in nqubits must be greater than 0')
116
-
117
- return value
118
-
119
- @field_validator('features')
120
- def validate_features(cls, v):
121
- if not all(0 < x <= 1 for x in v):
122
- raise ValueError("All features must be greater than 0 and less than or equal to 1")
123
- return v
124
-
125
- @field_validator('customMetric')
126
- def validate_custom_metric_field(cls, metric):
127
- if metric is None:
128
- return None # Allow None as a valid value
129
-
130
- # Check the function signature
131
- sig = inspect.signature(metric)
132
- params = list(sig.parameters.values())
133
-
134
- if len(params) < 2 or params[0].name != 'y_true' or params[1].name != 'y_pred':
135
- raise ValueError(
136
- f"Function {metric.__name__} does not have the required signature. "
137
- f"Expected first two arguments to be 'y_true' and 'y_pred'."
138
- )
139
-
140
- # Test the function by passing dummy arguments
141
- y_true = np.array([0, 1, 1, 0]) # Example ground truth labels
142
- y_pred = np.array([0, 1, 0, 0]) # Example predicted labels
143
-
144
- try:
145
- result = metric(y_true, y_pred)
146
- except Exception as e:
147
- raise ValueError(f"Function {metric.__name__} raised an error during execution: {e}")
148
-
149
- # Ensure the result is a scalar (int or float)
150
- if not isinstance(result, (int, float)):
151
- raise ValueError(
152
- f"Function {metric.__name__} returned {result}, which is not a scalar value."
153
- )
154
-
155
- return metric
156
-
157
- @field_validator('customImputerCat', 'customImputerNum')
158
- def check_preprocessor_methods(cls, preprocessor):
159
- # Check if preprocessor is an instance of a class
160
- if not isinstance(preprocessor, object):
161
- raise ValueError(
162
- f"Expected an instance of a class, but got {type(preprocessor).__name__}."
163
- )
164
-
165
- # Ensure the object has 'fit' and 'transform' methods
166
- if not (hasattr(preprocessor, 'fit') and hasattr(preprocessor, 'transform')):
167
- raise ValueError(
168
- f"Object {preprocessor.__class__.__name__} does not have required methods 'fit' and 'transform'."
169
- )
170
-
171
- # Optionally check if the object has 'fit_transform' method
172
- if not hasattr(preprocessor, 'fit_transform'):
173
- raise ValueError(
174
- f"Object {preprocessor.__class__.__name__} does not have 'fit_transform' method."
175
- )
176
-
177
- # Create dummy data for testing the preprocessor methods
178
- X_dummy = np.array([[1, 2], [3, 4], [5, 6]]) # Example dummy data
179
-
180
- try:
181
- # Ensure the object can fit on data
182
- preprocessor.fit(X_dummy)
183
- except Exception as e:
184
- raise ValueError(f"Object {preprocessor.__class__.__name__} failed to fit: {e}")
185
-
186
- try:
187
- # Ensure the object can transform data
188
- transformed = preprocessor.transform(X_dummy)
189
- except Exception as e:
190
- raise ValueError(f"Object {preprocessor.__class__.__name__} failed to transform: {e}")
191
-
192
- # Check the type of the transformed result
193
- if not isinstance(transformed, (np.ndarray, list)):
194
- raise ValueError(
195
- f"Object {preprocessor.__class__.__name__} returned {type(transformed)} from 'transform', expected np.ndarray or list."
196
- )
197
-
198
- return preprocessor
199
-
200
- def fit(self, X_train, y_train, X_test, y_test,showTable=True):
201
-
202
-
203
- printer.set_verbose(verbose=self.verbose)
204
- # Validation model to ensure input parameters are DataFrames and sizes match
205
- FitParamsValidator(
206
- train_x=X_train,
207
- train_y=y_train,
208
- test_x=X_test,
209
- test_y=y_test
210
- )
211
- printer.print("Validation successful, fitting the model...")
212
-
213
- # Fix seed
214
- fixSeed(self.randomstate)
215
- d = Dispatcher(sequential=self.sequential,threshold=self.threshold)
216
- d.dispatch(nqubits=self.nqubits,randomstate=self.randomstate,predictions=self.predictions,numPredictors=self.numPredictors,numLayers=self.numLayers,classifiers=self.classifiers,ansatzs=self.ansatzs,backend=self.backend,embeddings=self.embeddings,features=self.features,learningRate=self.learningRate,epochs=self.epochs,runs=self.runs,maxSamples=self.maxSamples,verbose=self.verbose,customMetric=self.customMetric,customImputerNum=self.customImputerNum,customImputerCat=self.customImputerCat, X_train=X_train,y_train=y_train, X_test=X_test, y_test=y_test,shots=self.shots,showTable=showTable,batch=self.batchSize,auto=self.batch)
217
-
218
- def repeated_cross_validation(self, X, y, n_splits=5, n_repeats=10, showTable=True):
219
- pass
220
-
221
- def leave_one_out(self, X, y, showTable=True):
222
- pass
223
-
224
- if __name__ == '__main__':
225
-
226
- Rotationals_family = sys.argv[1].lower() == 'true'
227
- Batch_auto = sys.argv[2].lower() == 'true'
228
- Sequential = sys.argv[3].lower() == 'true'
229
- Node = sys.argv[4].lower()
230
-
231
-
232
- if Rotationals_family:
233
- embeddings = {Embedding.RX,Embedding.RY,Embedding.RZ}
234
- else:
235
- embeddings = {Embedding.ZZ,Embedding.AMP}
236
-
237
-
238
-
239
-
240
- from sklearn.datasets import load_iris
241
- from sklearn.model_selection import train_test_split
242
-
243
- if Node == "slave4":
244
-
245
- dataset="iris"
246
-
247
- # Load data
248
- data = load_iris()
249
- X = data.data
250
- y = data.target
251
-
252
- # Split data
253
- X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.4,random_state =1234)
254
- elif Node == "slave5":
255
- dataset="monks"
256
- # fetch dataset
257
- monk_s_problems = fetch_ucirepo(id=70)
258
-
259
- # data (as pandas dataframes)
260
- X = monk_s_problems.data.features
261
- y = monk_s_problems.data.targets
262
-
263
- # Split data
264
- X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=.4,random_state =1234)
265
- elif Node == "slave3":
266
- dataset="tic tac toe"
267
- tic_tac_toe_endgame = fetch_ucirepo(id=101)
268
-
269
- # data (as pandas dataframes)
270
- X = tic_tac_toe_endgame.data.features
271
- y = tic_tac_toe_endgame.data.targets
272
-
273
- # Assume the features are in a DataFrame format
274
- X = pd.DataFrame(X)
275
-
276
- # Initialize the Label Encoder
277
- label_encoder = LabelEncoder()
278
-
279
- # Apply label encoding to each column (if you have more than one categorical feature)
280
- for column in X.columns:
281
- X[column] = label_encoder.fit_transform(X[column])
282
-
283
- # Step 3: Split data
284
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1234)
285
-
286
- print(f"PARAMETERS\nEmbeddings: {embeddings}\tBatch Auto: {Batch_auto}\tSequential: {Sequential}\tNode: {Node}\tDataset: {dataset}")
287
-
288
- classifier = QuantumClassifier(nqubits={4,8,16},classifiers={Model.QSVM},embeddings=embeddings,features={1.0},verbose=True,sequential=Sequential,backend=Backend.lightningQubit,batch=Batch_auto)
289
-
290
- start = time.time()
291
-
292
- print(f"TOTAL TIME: {time.time()-start}s\t PARALLEL: {not Sequential}")
293
-