scikit-learn-intelex 2024.1.0__py38-none-manylinux1_x86_64.whl → 2024.2.0__py38-none-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of scikit-learn-intelex might be problematic. Click here for more details.

Files changed (40) hide show
  1. {scikit_learn_intelex-2024.1.0.dist-info → scikit_learn_intelex-2024.2.0.dist-info}/METADATA +2 -2
  2. {scikit_learn_intelex-2024.1.0.dist-info → scikit_learn_intelex-2024.2.0.dist-info}/RECORD +38 -34
  3. sklearnex/cluster/dbscan.py +3 -3
  4. sklearnex/{preview/linear_model → covariance}/__init__.py +3 -3
  5. sklearnex/covariance/incremental_covariance.py +130 -0
  6. sklearnex/covariance/tests/test_incremental_covariance.py +143 -0
  7. sklearnex/dispatcher.py +19 -18
  8. sklearnex/ensemble/_forest.py +5 -10
  9. sklearnex/linear_model/__init__.py +1 -2
  10. sklearnex/linear_model/linear.py +3 -10
  11. sklearnex/{preview/linear_model → linear_model}/logistic_regression.py +19 -38
  12. sklearnex/linear_model/tests/test_logreg.py +70 -5
  13. sklearnex/neighbors/__init__.py +1 -1
  14. sklearnex/neighbors/_lof.py +167 -0
  15. sklearnex/neighbors/knn_classification.py +6 -9
  16. sklearnex/neighbors/knn_regression.py +6 -8
  17. sklearnex/neighbors/knn_unsupervised.py +5 -7
  18. sklearnex/neighbors/tests/test_neighbors.py +12 -11
  19. sklearnex/preview/__init__.py +1 -1
  20. sklearnex/preview/cluster/k_means.py +3 -8
  21. sklearnex/preview/covariance/covariance.py +46 -12
  22. sklearnex/preview/decomposition/pca.py +3 -5
  23. sklearnex/spmd/__init__.py +1 -0
  24. sklearnex/spmd/covariance/__init__.py +19 -0
  25. sklearnex/spmd/covariance/covariance.py +21 -0
  26. sklearnex/spmd/linear_model/__init__.py +2 -1
  27. sklearnex/spmd/linear_model/logistic_regression.py +21 -0
  28. sklearnex/svm/nusvc.py +5 -6
  29. sklearnex/svm/nusvr.py +3 -4
  30. sklearnex/svm/svc.py +5 -6
  31. sklearnex/svm/svr.py +3 -4
  32. sklearnex/tests/test_memory_usage.py +1 -4
  33. sklearnex/tests/test_monkeypatch.py +33 -20
  34. sklearnex/tests/test_n_jobs_support.py +71 -9
  35. sklearnex/tests/test_patching.py +19 -5
  36. sklearnex/neighbors/lof.py +0 -436
  37. sklearnex/preview/linear_model/tests/test_preview_logistic_regression.py +0 -59
  38. {scikit_learn_intelex-2024.1.0.dist-info → scikit_learn_intelex-2024.2.0.dist-info}/LICENSE.txt +0 -0
  39. {scikit_learn_intelex-2024.1.0.dist-info → scikit_learn_intelex-2024.2.0.dist-info}/WHEEL +0 -0
  40. {scikit_learn_intelex-2024.1.0.dist-info → scikit_learn_intelex-2024.2.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scikit-learn-intelex
3
- Version: 2024.1.0
3
+ Version: 2024.2.0
4
4
  Summary: Intel(R) Extension for Scikit-learn is a seamless way to speed up your Scikit-learn application.
5
5
  Home-page: https://github.com/intel/scikit-learn-intelex
6
6
  Author: Intel Corporation
@@ -31,7 +31,7 @@ Classifier: Topic :: Software Development
31
31
  Requires-Python: >=3.7
32
32
  Description-Content-Type: text/markdown
33
33
  License-File: LICENSE.txt
34
- Requires-Dist: daal4py (==2024.1.0)
34
+ Requires-Dist: daal4py (==2024.2.0)
35
35
  Requires-Dist: scikit-learn (>=0.22)
36
36
 
37
37
 
@@ -3,30 +3,34 @@ sklearnex/__main__.py,sha256=Le9BJq6aLEGSSoZwJLsOADxsV89ynBfA8BcoJHk9F24,1921
3
3
  sklearnex/_config.py,sha256=6WS3UuS4-0DxIJyGn7yQMosj-mGkLybLQrg3W7dED5o,3928
4
4
  sklearnex/_device_offload.py,sha256=J0tZqj6tfvIFonWR01PadtTLgloQk_QEfXeCoqEvJlk,7710
5
5
  sklearnex/_utils.py,sha256=EV4jC3plVdndsgrfPBsJZTzggrRdYWLwOpoIRWtTXt4,3812
6
- sklearnex/dispatcher.py,sha256=9prbr6QrUWjc8wq1OyTxhuFEIKlTR-Qup9ZMfNszBPE,12884
6
+ sklearnex/dispatcher.py,sha256=89QQ508iMt6qxoCIHt4woLvz_-KBOR6UvvhJJeC77hE,12878
7
7
  sklearnex/basic_statistics/__init__.py,sha256=NA5RGlwcp27UEeCgz0ngzYYd3MAIRpxlTy86uhM2-RE,821
8
8
  sklearnex/basic_statistics/basic_statistics.py,sha256=j5jBZ3DgXjoyjCBEFfYQHyl3LgLXEWWFnNHwInThdZw,796
9
9
  sklearnex/cluster/__init__.py,sha256=TsDzdbKzEubaP86iY8a7mSW0ZQzgjzGYuPkhc5lZmlQ,853
10
- sklearnex/cluster/dbscan.py,sha256=o3ZJVsvQXGG5y2bUja7Xd0V57VQGmB93FWDbKVo20kw,6674
10
+ sklearnex/cluster/dbscan.py,sha256=EegSXIFHKNhaKLoe_G8dsC5t2SXRdu3tDzsHbcubdDM,6706
11
11
  sklearnex/cluster/k_means.py,sha256=1QKcFUQcnycu8kSD8uYSaDIuedHbxZsV_gvUfcEwVAM,806
12
12
  sklearnex/cluster/tests/test_dbscan.py,sha256=AgFoKwXFVyLKLvEtJVs0qlbBs1Ci3PcRft7f-6_ENOU,1464
13
13
  sklearnex/cluster/tests/test_kmeans.py,sha256=H3a9NSHRRSASo0Eceo44Kjq6GwEoMqMzcubaNt1s1QQ,1213
14
+ sklearnex/covariance/__init__.py,sha256=_c86wiKDGBH50ofN-_tn7lO4M2YVEWO5AHhJIfimUDk,859
15
+ sklearnex/covariance/incremental_covariance.py,sha256=3s9gmLP48DPhlhnFcg2JB7RQ2sliSZdNroJkv8-1sIA,4526
16
+ sklearnex/covariance/tests/test_incremental_covariance.py,sha256=JIa4p-1_RYvU2ZVLx27iz0OKp0Nwrl9sYCsT0Dlyi2I,5402
14
17
  sklearnex/decomposition/__init__.py,sha256=RJBYDWyvnn5DA-j5r4IqVdXLp6H4mdPySsSjnXhlv-U,805
15
18
  sklearnex/decomposition/pca.py,sha256=68ksLxTP5fMOUhRmiIq9QNm0YzQanBNzxsq-zA8DKaY,809
16
19
  sklearnex/decomposition/tests/test_pca.py,sha256=aP4gxjML38CFpknwNVIkZLQCc8t2rqYGlWVo03vsMfE,1146
17
20
  sklearnex/doc/third-party-programs.txt,sha256=qA1XbOqYkMRnp29p8VxXjfcH0kHE8NSO5s2heea83-8,21773
18
21
  sklearnex/ensemble/__init__.py,sha256=2xB2KfG7l7uJv0p3kfVhrs4KLM174SCsigZhYqwSYAA,1035
19
- sklearnex/ensemble/_forest.py,sha256=Q3foDFaNmEHVeYjK8dIvP_-6GvSEV7dYPTXCrVN0e6A,70441
22
+ sklearnex/ensemble/_forest.py,sha256=nw_aUdgyjxKWc6yZ-8DBaqNqDODhx5uEy13GbpM7C18,70561
20
23
  sklearnex/ensemble/tests/test_forest.py,sha256=KoETKE1sSpKgp38s9bepAujJjcG21eFX5RyYINcHCUo,4516
21
24
  sklearnex/glob/__main__.py,sha256=--FAjkhh5E6a1TFOGu6_BM3MHe78jP8oSpl0xKiRtTI,2531
22
25
  sklearnex/glob/dispatcher.py,sha256=IEEPhAOCVzC2JDFvYtijbiPAbaUY5RrCAhFLRjMMe1w,3018
23
- sklearnex/linear_model/__init__.py,sha256=97jj68r_V3JmsO8sZJt9BXlXazAPAzAz0E56dsNu4Sk,1100
26
+ sklearnex/linear_model/__init__.py,sha256=5XZDZh8R0SmT8D8ZtSjW7-MKRO7l1jOZ8CUnt_OzHe4,1047
24
27
  sklearnex/linear_model/coordinate_descent.py,sha256=uZOHIKfFlHoMlNXZCh2MAnZE30fRZlmtcF7UsZQ3Vq4,822
25
- sklearnex/linear_model/linear.py,sha256=see-YysI8U1ilseHlerfcb97BFmenaubW-3BmDy0ixI,13870
28
+ sklearnex/linear_model/linear.py,sha256=ed7pNKKnRkwa-wC0haUCOGHQoPkA4AuFhKNMzmRL6Fw,13832
26
29
  sklearnex/linear_model/logistic_path.py,sha256=Nq3JPXSzE4bjamnP3gGQtsMKks2v7s6bSiuYRnFqrRw,849
30
+ sklearnex/linear_model/logistic_regression.py,sha256=ezW717qpPM4EC6uvmKbvxZZZwkooLuc8mfddAu5ebJM,12547
27
31
  sklearnex/linear_model/ridge.py,sha256=0oxlM5McYYvl0KxK9OIGJKM6lOADuFSPTdfx-efJNTI,810
28
32
  sklearnex/linear_model/tests/test_linear.py,sha256=li9LLWgap6YCwvNiHQ9yHduvUIsK1lpXfuVNR3dLwig,3200
29
- sklearnex/linear_model/tests/test_logreg.py,sha256=KO4240ACvhLsVHC_H09lhj4APUWPxmimHBE1JoY6TB8,1175
33
+ sklearnex/linear_model/tests/test_logreg.py,sha256=iH6pxRJ5Nh6RzO_ohFLlt-TpJpQmzKh2QMU81SnPwv4,3346
30
34
  sklearnex/manifold/__init__.py,sha256=3FU5fY1YvHAyNeSlUHsZfPnAbvlAUtZpum0Obmok2KE,809
31
35
  sklearnex/manifold/t_sne.py,sha256=U6F_dGB6taVuEJvTnVBWD-sri3x3m0Khu3HI4wXidhg,805
32
36
  sklearnex/manifold/tests/test_tsne.py,sha256=SNqe1yg45mL4qIsxpX5hDpBsIyrv9r__6wfrerjp3yU,1063
@@ -37,61 +41,61 @@ sklearnex/metrics/tests/test_metrics.py,sha256=Nwfz8UV4V4fKLLY7f9P9eg8uY09xLXaFx
37
41
  sklearnex/model_selection/__init__.py,sha256=64045Y-nzLDBepO6IRjt88LhL2DM3KdvpCF2vvj_RpA,842
38
42
  sklearnex/model_selection/split.py,sha256=qjmy8sRf_QEG8LhT0ivn_tICMCYmt7ffZZV1-rLQnko,824
39
43
  sklearnex/model_selection/tests/test_model_selection.py,sha256=3kZIq8kba8SW93DBWXupoRymNStR1_mGGYSErQRnwME,1338
40
- sklearnex/neighbors/__init__.py,sha256=fyxRt3wosR6Tg_QYWOOt477jLisT2iZl6CzfTL1y9ro,1077
44
+ sklearnex/neighbors/__init__.py,sha256=_GMJ2L-6YechRYI2fOFwHjy0kebUncE81z4FmdXUlb8,1078
45
+ sklearnex/neighbors/_lof.py,sha256=HddPA9VdHEKCENr260qEAWoaB3KdqVYqHE-BssSuWPY,6605
41
46
  sklearnex/neighbors/common.py,sha256=iia-EUIRUIohDCIGpHbVx2PeDlwUvz4Mj1Tn169jidA,10781
42
- sklearnex/neighbors/knn_classification.py,sha256=z6-GgVnOnkyypZGyf0wFOev2Hc8iWy62gfr4dMFFdXM,11023
43
- sklearnex/neighbors/knn_regression.py,sha256=Ns_VPJ1ismXcPeJkUp8JxiHe-Y3QqI0d5HZuub7owkQ,9917
44
- sklearnex/neighbors/knn_unsupervised.py,sha256=MaapzNDA1wy2ZTuCeqpkKDEMBItrbMoW0p3JniCRCsA,7590
45
- sklearnex/neighbors/lof.py,sha256=XNnqrOnpIC5apeg6nToxeGqjQNIEp7sotflmm9iF4UY,16264
46
- sklearnex/neighbors/tests/test_neighbors.py,sha256=EAGF7Opgf-xWUAWYc6OdDgW5-2XsdkzLK_a35IX4Se4,3565
47
- sklearnex/preview/__init__.py,sha256=hZfIgTkkkUVaQ-SKaqI-S_SiXCkUzCUYxpSnbrhhEJU,813
47
+ sklearnex/neighbors/knn_classification.py,sha256=C0jqL9qRQwt31JTIxjjWQWJuiy_D1I5Am1_W6ek8beY,11077
48
+ sklearnex/neighbors/knn_regression.py,sha256=7ihpIl5SKSYDGvXtsUC0vNaOTj6_NNpXAAsu3uiQuaQ,9978
49
+ sklearnex/neighbors/knn_unsupervised.py,sha256=Stw63vAKiaHnPQ2cfnXWD1Omf-QssT4BqhFPCMmyVCs,7620
50
+ sklearnex/neighbors/tests/test_neighbors.py,sha256=s4jip1Ntrhp5Zu9-pHVbeIoNdNFsq03ABY-N-iF_UL8,3437
51
+ sklearnex/preview/__init__.py,sha256=1QcbV6xCSP7QCXRxYLVPc_b4lxE0cQiyTrMm4xOnosM,797
48
52
  sklearnex/preview/cluster/__init__.py,sha256=FONzOuTGAb5NwdfFhLco99P_VccRk9NBkDHU3EKuAIs,794
49
53
  sklearnex/preview/cluster/_common.py,sha256=bgpzijxgyrAtdNCCHyCNsZ-N70KQYNwMuoCTNdUna6o,2718
50
- sklearnex/preview/cluster/k_means.py,sha256=hG_hq8dfHM8B97L9T6fPU5TuQB7d3URWHfnB1cskT_8,13022
54
+ sklearnex/preview/cluster/k_means.py,sha256=jSuU8E6r4fdbbBnxBpWp4ybBa62kCnbBx7zDXyUr0Cs,13007
51
55
  sklearnex/preview/covariance/__init__.py,sha256=DPYTk8lwcVphtwU8J3CyUYH8Uz6Zr0Uz4S6nn-RY5iM,825
52
- sklearnex/preview/covariance/covariance.py,sha256=FXcRqdrHAhRNsyD6hG4a63ZxHMOZqdOS9yAqzfPY1LI,3614
56
+ sklearnex/preview/covariance/covariance.py,sha256=LrNrGP62FEhTSBVaO-EOYAaq-Rszc1VS2B2IqMgh4oo,4938
53
57
  sklearnex/preview/covariance/tests/test_covariance.py,sha256=GYI4bMIhGnjmOXpt8J7R0JQmpm9eOvDjIphugRa4kD8,2140
54
58
  sklearnex/preview/decomposition/__init__.py,sha256=uRenwBGf7hHQqwAVYbBw3clUQB_HWUqJGOAKTuCnrcM,805
55
- sklearnex/preview/decomposition/pca.py,sha256=KkbK3NwBCvE_0eQryI3zFiwlXPiNdj4dCQd2ywgSIn8,14341
59
+ sklearnex/preview/decomposition/pca.py,sha256=S8g5GhLdnUAb1FifNx6gnrwA8AWv8ddZtLY1Er83BkY,14342
56
60
  sklearnex/preview/decomposition/tests/test_preview_pca.py,sha256=xcllHM-jDIq33rAWTeh_gjhS2qfCNbUIAI5KeLPA8aY,1790
57
- sklearnex/preview/linear_model/__init__.py,sha256=Xr9Gk6mCq794GDLA2UlJPiLovOhZAx6rA5Bu123L-Rg,832
58
- sklearnex/preview/linear_model/logistic_regression.py,sha256=KIqpboN15BFV4BOyEokc5LF9eiqwBetlx2i27WBNIK0,13377
59
- sklearnex/preview/linear_model/tests/test_preview_logistic_regression.py,sha256=UjHXFhlea2P7feUKC64uHGNOhJpEaM2EZoAK0JJbz3I,2422
60
- sklearnex/spmd/__init__.py,sha256=8cxQy-oCFy1TJto0qoRf4lt98siPx2c-YV99YC-sk6s,871
61
+ sklearnex/spmd/__init__.py,sha256=ChQy2kEWlo4KGvs0RnbPoPEVhdgl8URV099B1rZtF5Y,889
61
62
  sklearnex/spmd/basic_statistics/__init__.py,sha256=NA5RGlwcp27UEeCgz0ngzYYd3MAIRpxlTy86uhM2-RE,821
62
63
  sklearnex/spmd/basic_statistics/basic_statistics.py,sha256=_dQ9mhVYxeuChATEpAmHpXDpgW3YvtK1qrG-kLr2MtI,886
63
64
  sklearnex/spmd/cluster/__init__.py,sha256=qBBfrCHh6_82EROLbu54XKk7SmmRwS1XJyCj0zwkoUw,1029
64
65
  sklearnex/spmd/cluster/dbscan.py,sha256=23YNzPhx4MZijU0md-E3ZkHpTkhUh5cmtS3loHe-KhI,1824
65
66
  sklearnex/spmd/cluster/kmeans.py,sha256=Rnb9tr9LXVto5vCAumk7ZJfa9BYYDhdD1qUWL-QK5bY,868
67
+ sklearnex/spmd/covariance/__init__.py,sha256=5xeL1REMIxCv5M1ya99GGKaVjctUngboZ3uXgxcZ04o,823
68
+ sklearnex/spmd/covariance/covariance.py,sha256=_oIlr1W1vqDHqIPnsCb04HcBCen5oBHQr5-_n9OSvIA,884
66
69
  sklearnex/spmd/decomposition/__init__.py,sha256=dBh0ZMIiaqdf3DKbt8FWNB2K9Iacs395m8OxaDFQg_M,784
67
70
  sklearnex/spmd/decomposition/pca.py,sha256=CUrsVD2jae-A9H8RB_emza_fe82CwnFa5PEy0fW_EZ8,871
68
71
  sklearnex/spmd/ensemble/__init__.py,sha256=B3yi7hWoVogMIiw0QRT_x5atsAFS-OO72YPLGeUQJ8M,873
69
72
  sklearnex/spmd/ensemble/forest.py,sha256=ao6lyzcxoRW-RG9xIYwtDFyM7JIjlF8wmQKpOv_oSRQ,3113
70
- sklearnex/spmd/linear_model/__init__.py,sha256=eCJGleo00O6c280G97i18KNmSvi6uwX7wM1ZN5JMqhw,819
73
+ sklearnex/spmd/linear_model/__init__.py,sha256=WwqCr2DOyUnkSIHlP0yq0UI2yFDgSl5MHV7wu3QGJtA,893
71
74
  sklearnex/spmd/linear_model/linear_model.py,sha256=7QPCIQTWKBiZBTDZZbpZXi-REgxQCfRMt6rHPJAnc5E,883
75
+ sklearnex/spmd/linear_model/logistic_regression.py,sha256=q_HkfWcg0RgFbk2t9FeV0ZY28HHAOtkGEnUj4tLuwt4,885
72
76
  sklearnex/spmd/neighbors/__init__.py,sha256=S16sH8N_18LN_8AgC_wGK5EDSNyuN-F-gI6GVlaiWVE,906
73
77
  sklearnex/spmd/neighbors/neighbors.py,sha256=SiKAS_RVt34MUcGytBS5pHnI_5vFNxJn8jqt1MOhDh8,940
74
78
  sklearnex/svm/__init__.py,sha256=f3e4ZFwZfx6MsXsn94VK1xVm6mWKT5XCiHczo6zNyAQ,1057
75
79
  sklearnex/svm/_common.py,sha256=Dt1Iyz1g04zOW6hn9cHa9ruzM_MHAIq0ZEEIxh5s7nI,7167
76
- sklearnex/svm/nusvc.py,sha256=0-18XPzyhxQpD1nqVaUS-cymdnqZDCqZbCznD-oLr_U,8983
77
- sklearnex/svm/nusvr.py,sha256=BJiQYQV5-YS5P0DZTh3k-vvbGuu-GWZI3QBUsuxLMak,5215
78
- sklearnex/svm/svc.py,sha256=FL5GLMh82nhqUfaWjbAivghXq5iM9V_AVtkp_MAhCnI,10263
79
- sklearnex/svm/svr.py,sha256=dsVkOGsJ5qqx7uaWkkc5Dv7zwYaZrC1mUgcPQOadq7s,5219
80
+ sklearnex/svm/nusvc.py,sha256=Bvs_FmC1CYH23tXyrQE3Ti1h3BqK0YeX-_PBTZMRM0k,9008
81
+ sklearnex/svm/nusvr.py,sha256=XHlDAGwnx1NVkkt8c9EUST8zVRLQY7Mwu335TPCcuRk,5237
82
+ sklearnex/svm/svc.py,sha256=4f-vJlPGeAcquz7nkSCeu0LJTTXCbdU2M54HkT49TeQ,10288
83
+ sklearnex/svm/svr.py,sha256=fmYi0dghOmmyFFVI59COX9-tyouQnSDfHIbs8GY8AHs,5241
80
84
  sklearnex/svm/tests/test_svm.py,sha256=Ru-aGNGCsRJts7SEEYbnKcVqUx-DqPyUtw-hEoMVpW8,4190
81
85
  sklearnex/tests/_models_info.py,sha256=xhjvnU3TvQg8J5Cih2hphWAOSsT8DnKmCyYbtwa0Qvs,4785
82
86
  sklearnex/tests/test_config.py,sha256=SnSJjxAAysISDyC3bYKSJiRHStkB9X-yjLeF11LpRog,1372
83
- sklearnex/tests/test_memory_usage.py,sha256=-9S5EY9Ivy6WRW18IJEY1uyJxo9073GwXHybnFrJULc,7381
84
- sklearnex/tests/test_monkeypatch.py,sha256=D1ATlQtrNAtpHE7zmIxl7Mv41HxmRDRFAnjBmkUDGW8,8233
85
- sklearnex/tests/test_n_jobs_support.py,sha256=fa_XVNbBm5YQIguz5mgiQM8jmf2pQ7IwE8mgutyAbxk,1242
87
+ sklearnex/tests/test_memory_usage.py,sha256=lwm63gSyRR82n3LGBdsophU_NvZK5RHkxAoTDZ2AcWI,7309
88
+ sklearnex/tests/test_monkeypatch.py,sha256=wmtEeDNGoiPBlAh4Vmts86eFQLk8Wbzjbj6Busf6V3o,8663
89
+ sklearnex/tests/test_n_jobs_support.py,sha256=ynfCSdCMnR3yEq1YEf_cilVD7zSe0sS-ZQ9jC0hHo8M,3903
86
90
  sklearnex/tests/test_parallel.py,sha256=bMu22noUvGiDX4oyxKIHPiOEoBP9lRQQUq6wq8ZD730,1776
87
- sklearnex/tests/test_patching.py,sha256=6H0Um6N5qhD5OZ875HhAcKPFXM81les5XWCCsaNyMb8,3759
91
+ sklearnex/tests/test_patching.py,sha256=IySMpMdWVgoAZgs1cRKvdJeb8RXElFwjjNdHcE4jJz0,4247
88
92
  sklearnex/tests/test_run_to_run_stability_tests.py,sha256=4HDOeJruA1EDILbyQJtsHFmEXC0D1upSHuOT-KyTlEc,14008
89
93
  sklearnex/tests/utils/_launch_algorithms.py,sha256=pJT5tAW9rWvk7GT37R-B0-e8SLz8e9FSZw8yu4LWNJ4,3724
90
94
  sklearnex/utils/__init__.py,sha256=I8mbJQ3Zsm_F3sCLAhJQb7tUrG30kVsQ-wZoqA8vDdA,842
91
95
  sklearnex/utils/parallel.py,sha256=VBcS-KUdyq7XpJUN6ygmNjyWtYLroghbvCxQ8nVU3YI,2085
92
96
  sklearnex/utils/validation.py,sha256=fjfhQiKnBQnD7LCBlacMyvsrhGnlMLRXk5Q69uoZIP4,827
93
- scikit_learn_intelex-2024.1.0.dist-info/LICENSE.txt,sha256=7micbUpzQXphq9e_2oL7PpZcvoXzPuQHIDEXyKXC81s,10797
94
- scikit_learn_intelex-2024.1.0.dist-info/METADATA,sha256=qYwrcOCj5B6HvCitweNkQDzI5qgtww7X6MdAeKdLElo,12449
95
- scikit_learn_intelex-2024.1.0.dist-info/WHEEL,sha256=KUw5v0OkIti_FPHYLEMiqSBmUFpuC4n7PHRMpplWaA4,107
96
- scikit_learn_intelex-2024.1.0.dist-info/top_level.txt,sha256=kzKChSWGJEYFmdj5PwE63HNuP_PVOhWfD32ytH9rL9Q,10
97
- scikit_learn_intelex-2024.1.0.dist-info/RECORD,,
97
+ scikit_learn_intelex-2024.2.0.dist-info/LICENSE.txt,sha256=7micbUpzQXphq9e_2oL7PpZcvoXzPuQHIDEXyKXC81s,10797
98
+ scikit_learn_intelex-2024.2.0.dist-info/METADATA,sha256=nNiD7x2RPhuhzEKH-Hg0-iOk3kqAUlLd3f--6KmLP-c,12449
99
+ scikit_learn_intelex-2024.2.0.dist-info/WHEEL,sha256=KUw5v0OkIti_FPHYLEMiqSBmUFpuC4n7PHRMpplWaA4,107
100
+ scikit_learn_intelex-2024.2.0.dist-info/top_level.txt,sha256=kzKChSWGJEYFmdj5PwE63HNuP_PVOhWfD32ytH9rL9Q,10
101
+ scikit_learn_intelex-2024.2.0.dist-info/RECORD,,
@@ -22,7 +22,8 @@ from scipy import sparse as sp
22
22
  from sklearn.cluster import DBSCAN as sklearn_DBSCAN
23
23
  from sklearn.utils.validation import _check_sample_weight
24
24
 
25
- from daal4py.sklearn._utils import control_n_jobs, run_with_n_jobs, sklearn_check_version
25
+ from daal4py.sklearn._n_jobs_support import control_n_jobs
26
+ from daal4py.sklearn._utils import sklearn_check_version
26
27
  from onedal.cluster import DBSCAN as onedal_DBSCAN
27
28
 
28
29
  from .._device_offload import dispatch, wrap_output_data
@@ -45,7 +46,7 @@ class BaseDBSCAN(ABC):
45
46
  self.n_features_in_ = self._onedal_estimator.n_features_in_
46
47
 
47
48
 
48
- @control_n_jobs
49
+ @control_n_jobs(decorated_methods=["fit"])
49
50
  class DBSCAN(sklearn_DBSCAN, BaseDBSCAN):
50
51
  __doc__ = sklearn_DBSCAN.__doc__
51
52
 
@@ -83,7 +84,6 @@ class DBSCAN(sklearn_DBSCAN, BaseDBSCAN):
83
84
  self.p = p
84
85
  self.n_jobs = n_jobs
85
86
 
86
- @run_with_n_jobs
87
87
  def _onedal_fit(self, X, y, sample_weight=None, queue=None):
88
88
  onedal_params = {
89
89
  "eps": self.eps,
@@ -1,5 +1,5 @@
1
1
  # ===============================================================================
2
- # Copyright 2023 Intel Corporation
2
+ # Copyright 2024 Intel Corporation
3
3
  #
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
5
  # you may not use this file except in compliance with the License.
@@ -14,6 +14,6 @@
14
14
  # limitations under the License.
15
15
  # ===============================================================================
16
16
 
17
- from .logistic_regression import LogisticRegression
17
+ from .incremental_covariance import IncrementalEmpiricalCovariance
18
18
 
19
- __all__ = ["LogisticRegression"]
19
+ __all__ = ["IncrementalEmpiricalCovariance"]
@@ -0,0 +1,130 @@
1
+ # ===============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ import numpy as np
18
+ from sklearn.utils import check_array, gen_batches
19
+
20
+ from daal4py.sklearn._n_jobs_support import control_n_jobs
21
+ from onedal._device_offload import support_usm_ndarray
22
+ from onedal.covariance import (
23
+ IncrementalEmpiricalCovariance as onedal_IncrementalEmpiricalCovariance,
24
+ )
25
+
26
+
27
+ @control_n_jobs(decorated_methods=["partial_fit"])
28
+ class IncrementalEmpiricalCovariance:
29
+ """
30
+ Incremental estimator for covariance.
31
+ Allows to compute empirical covariance estimated by maximum
32
+ likelihood method if data are splitted into batches.
33
+
34
+ Parameters
35
+ ----------
36
+ batch_size : int, default=None
37
+ The number of samples to use for each batch. Only used when calling
38
+ ``fit``. If ``batch_size`` is ``None``, then ``batch_size``
39
+ is inferred from the data and set to ``5 * n_features``, to provide a
40
+ balance between approximation accuracy and memory consumption.
41
+
42
+ Attributes
43
+ ----------
44
+ location_ : ndarray of shape (n_features,)
45
+ Estimated location, i.e. the estimated mean.
46
+
47
+ covariance_ : ndarray of shape (n_features, n_features)
48
+ Estimated covariance matrix
49
+ """
50
+
51
+ _onedal_incremental_covariance = staticmethod(onedal_IncrementalEmpiricalCovariance)
52
+
53
+ def __init__(self, batch_size=None):
54
+ self._need_to_finalize = False # If True then finalize compute should
55
+ # be called to obtain covariance_ or location_ from partial compute data
56
+ self.batch_size = batch_size
57
+
58
+ def _onedal_finalize_fit(self):
59
+ assert hasattr(self, "_onedal_estimator")
60
+ self._onedal_estimator.finalize_fit()
61
+ self._need_to_finalize = False
62
+
63
+ def _onedal_partial_fit(self, X, queue):
64
+ onedal_params = {
65
+ "method": "dense",
66
+ "bias": True,
67
+ }
68
+ if not hasattr(self, "_onedal_estimator"):
69
+ self._onedal_estimator = self._onedal_incremental_covariance(**onedal_params)
70
+ self._onedal_estimator.partial_fit(X, queue)
71
+ self._need_to_finalize = True
72
+
73
+ @property
74
+ def covariance_(self):
75
+ if self._need_to_finalize:
76
+ self._onedal_finalize_fit()
77
+ return self._onedal_estimator.covariance_
78
+
79
+ @property
80
+ def location_(self):
81
+ if self._need_to_finalize:
82
+ self._onedal_finalize_fit()
83
+ return self._onedal_estimator.location_
84
+
85
+ @support_usm_ndarray()
86
+ def partial_fit(self, X, queue=None):
87
+ """
88
+ Incremental fit with X. All of X is processed as a single batch.
89
+
90
+ Parameters
91
+ ----------
92
+ X : array-like of shape (n_samples, n_features)
93
+ Training data, where `n_samples` is the number of samples and
94
+ `n_features` is the number of features.
95
+
96
+ Returns
97
+ -------
98
+ self : object
99
+ Returns the instance itself.
100
+ """
101
+ X = check_array(X, dtype=[np.float64, np.float32])
102
+ self._onedal_partial_fit(X, queue)
103
+ return self
104
+
105
+ def fit(self, X, queue=None):
106
+ """
107
+ Fit the model with X, using minibatches of size batch_size.
108
+
109
+ Parameters
110
+ ----------
111
+ X : array-like of shape (n_samples, n_features)
112
+ Training data, where `n_samples` is the number of samples and
113
+ `n_features` is the number of features.
114
+
115
+ Returns
116
+ -------
117
+ self : object
118
+ Returns the instance itself.
119
+ """
120
+ n_samples, n_features = X.shape
121
+ if self.batch_size is None:
122
+ batch_size_ = 5 * n_features
123
+ else:
124
+ batch_size_ = self.batch_size
125
+ for batch in gen_batches(n_samples, batch_size_):
126
+ X_batch = X[batch]
127
+ self.partial_fit(X_batch, queue=queue)
128
+
129
+ self._onedal_finalize_fit()
130
+ return self
@@ -0,0 +1,143 @@
1
+ # ===============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ import numpy as np
18
+ import pytest
19
+ from numpy.testing import assert_allclose
20
+
21
+ from onedal.tests.utils._dataframes_support import (
22
+ _convert_to_dataframe,
23
+ get_dataframes_and_queues,
24
+ )
25
+
26
+
27
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
28
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
29
+ def test_sklearnex_partial_fit_on_gold_data(dataframe, queue, dtype):
30
+ from sklearnex.covariance import IncrementalEmpiricalCovariance
31
+
32
+ X = np.array([[0, 1], [0, 1]])
33
+ X = X.astype(dtype)
34
+ X_split = np.array_split(X, 2)
35
+ inccov = IncrementalEmpiricalCovariance()
36
+
37
+ for i in range(2):
38
+ X_split_df = _convert_to_dataframe(
39
+ X_split[i], sycl_queue=queue, target_df=dataframe
40
+ )
41
+ result = inccov.partial_fit(X_split_df)
42
+
43
+ expected_covariance = np.array([[0, 0], [0, 0]])
44
+ expected_means = np.array([0, 1])
45
+
46
+ assert_allclose(expected_covariance, result.covariance_)
47
+ assert_allclose(expected_means, result.location_)
48
+
49
+ X = np.array([[1, 2], [3, 6]])
50
+ X = X.astype(dtype)
51
+ X_split = np.array_split(X, 2)
52
+ inccov = IncrementalEmpiricalCovariance()
53
+
54
+ for i in range(2):
55
+ X_split_df = _convert_to_dataframe(
56
+ X_split[i], sycl_queue=queue, target_df=dataframe
57
+ )
58
+ result = inccov.partial_fit(X_split_df)
59
+
60
+ expected_covariance = np.array([[1, 2], [2, 4]])
61
+ expected_means = np.array([2, 4])
62
+
63
+ assert_allclose(expected_covariance, result.covariance_)
64
+ assert_allclose(expected_means, result.location_)
65
+
66
+
67
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
68
+ @pytest.mark.parametrize("batch_size", [2, 4])
69
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
70
+ def test_sklearnex_fit_on_gold_data(dataframe, queue, batch_size, dtype):
71
+ from sklearnex.covariance import IncrementalEmpiricalCovariance
72
+
73
+ X = np.array([[0, 1, 2, 3], [0, -1, -2, -3], [0, 1, 2, 3], [0, 1, 2, 3]])
74
+ X = X.astype(dtype)
75
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
76
+ inccov = IncrementalEmpiricalCovariance(batch_size=batch_size)
77
+
78
+ result = inccov.fit(X_df)
79
+
80
+ expected_covariance = np.array(
81
+ [[0, 0, 0, 0], [0, 0.75, 1.5, 2.25], [0, 1.5, 3, 4.5], [0, 2.25, 4.5, 6.75]]
82
+ )
83
+ expected_means = np.array([0, 0.5, 1, 1.5])
84
+
85
+ assert_allclose(expected_covariance, result.covariance_)
86
+ assert_allclose(expected_means, result.location_)
87
+
88
+
89
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
90
+ @pytest.mark.parametrize("num_batches", [2, 4, 6, 8, 10])
91
+ @pytest.mark.parametrize("row_count", [100, 1000, 2000])
92
+ @pytest.mark.parametrize("column_count", [10, 100, 200])
93
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
94
+ def test_sklearnex_partial_fit_on_random_data(
95
+ dataframe, queue, num_batches, row_count, column_count, dtype
96
+ ):
97
+ from sklearnex.covariance import IncrementalEmpiricalCovariance
98
+
99
+ seed = 77
100
+ gen = np.random.default_rng(seed)
101
+ X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
102
+ X = X.astype(dtype)
103
+ X_split = np.array_split(X, num_batches)
104
+ inccov = IncrementalEmpiricalCovariance()
105
+
106
+ for i in range(num_batches):
107
+ X_split_df = _convert_to_dataframe(
108
+ X_split[i], sycl_queue=queue, target_df=dataframe
109
+ )
110
+ result = inccov.partial_fit(X_split_df)
111
+
112
+ expected_covariance = np.cov(X.T, bias=1)
113
+ expected_means = np.mean(X, axis=0)
114
+
115
+ assert_allclose(expected_covariance, result.covariance_, atol=1e-6)
116
+ assert_allclose(expected_means, result.location_, atol=1e-6)
117
+
118
+
119
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
120
+ @pytest.mark.parametrize("num_batches", [2, 4, 6, 8, 10])
121
+ @pytest.mark.parametrize("row_count", [100, 1000, 2000])
122
+ @pytest.mark.parametrize("column_count", [10, 100, 200])
123
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
124
+ def test_sklearnex_fit_on_random_data(
125
+ dataframe, queue, num_batches, row_count, column_count, dtype
126
+ ):
127
+ from sklearnex.covariance import IncrementalEmpiricalCovariance
128
+
129
+ seed = 77
130
+ gen = np.random.default_rng(seed)
131
+ X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
132
+ X = X.astype(dtype)
133
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
134
+ batch_size = row_count // num_batches
135
+ inccov = IncrementalEmpiricalCovariance(batch_size=batch_size)
136
+
137
+ result = inccov.fit(X_df)
138
+
139
+ expected_covariance = np.cov(X.T, bias=1)
140
+ expected_means = np.mean(X, axis=0)
141
+
142
+ assert_allclose(expected_covariance, result.covariance_, atol=1e-6)
143
+ assert_allclose(expected_means, result.location_, atol=1e-6)
sklearnex/dispatcher.py CHANGED
@@ -69,6 +69,7 @@ def get_patch_map():
69
69
  from .ensemble import RandomForestClassifier as RandomForestClassifier_sklearnex
70
70
  from .ensemble import RandomForestRegressor as RandomForestRegressor_sklearnex
71
71
  from .linear_model import LinearRegression as LinearRegression_sklearnex
72
+ from .linear_model import LogisticRegression as LogisticRegression_sklearnex
72
73
  from .neighbors import KNeighborsClassifier as KNeighborsClassifier_sklearnex
73
74
  from .neighbors import KNeighborsRegressor as KNeighborsRegressor_sklearnex
74
75
  from .neighbors import LocalOutlierFactor as LocalOutlierFactor_sklearnex
@@ -80,9 +81,6 @@ def get_patch_map():
80
81
  EmpiricalCovariance as EmpiricalCovariance_sklearnex,
81
82
  )
82
83
  from .preview.decomposition import PCA as PCA_sklearnex
83
- from .preview.linear_model import (
84
- LogisticRegression as LogisticRegression_sklearnex,
85
- )
86
84
  from .svm import SVC as SVC_sklearnex
87
85
  from .svm import SVR as SVR_sklearnex
88
86
  from .svm import NuSVC as NuSVC_sklearnex
@@ -119,21 +117,6 @@ def get_patch_map():
119
117
  ]
120
118
  ]
121
119
 
122
- # LogisticRegression
123
- mapping.pop("logisticregression")
124
- mapping.pop("log_reg")
125
- mapping["log_reg"] = [
126
- [
127
- (
128
- linear_model_module,
129
- "LogisticRegression",
130
- LogisticRegression_sklearnex,
131
- ),
132
- None,
133
- ]
134
- ]
135
- mapping["logisticregression"] = mapping["log_reg"]
136
-
137
120
  # DBSCAN
138
121
  mapping.pop("dbscan")
139
122
  mapping["dbscan"] = [[(cluster_module, "DBSCAN", DBSCAN_sklearnex), None]]
@@ -161,6 +144,24 @@ def get_patch_map():
161
144
  ]
162
145
  mapping["linearregression"] = mapping["linear"]
163
146
 
147
+ # Logistic Regression
148
+
149
+ mapping.pop("logisticregression")
150
+ mapping.pop("log_reg")
151
+ mapping.pop("logistic")
152
+ mapping.pop("_logistic_regression_path")
153
+ mapping["log_reg"] = [
154
+ [
155
+ (
156
+ linear_model_module,
157
+ "LogisticRegression",
158
+ LogisticRegression_sklearnex,
159
+ ),
160
+ None,
161
+ ]
162
+ ]
163
+ mapping["logisticregression"] = mapping["log_reg"]
164
+
164
165
  # kNN
165
166
  mapping.pop("knn_classifier")
166
167
  mapping.pop("kneighborsclassifier")
@@ -42,11 +42,10 @@ from sklearn.utils.validation import (
42
42
  check_X_y,
43
43
  )
44
44
 
45
+ from daal4py.sklearn._n_jobs_support import control_n_jobs
45
46
  from daal4py.sklearn._utils import (
46
47
  check_tree_nodes,
47
- control_n_jobs,
48
48
  daal_check_version,
49
- run_with_n_jobs,
50
49
  sklearn_check_version,
51
50
  )
52
51
  from onedal.ensemble import ExtraTreesClassifier as onedal_ExtraTreesClassifier
@@ -78,7 +77,6 @@ if sklearn_check_version("1.4"):
78
77
  class BaseForest(ABC):
79
78
  _onedal_factory = None
80
79
 
81
- @run_with_n_jobs
82
80
  def _onedal_fit(self, X, y, sample_weight=None, queue=None):
83
81
  if sklearn_check_version("0.24"):
84
82
  X, y = self._validate_data(
@@ -787,7 +785,6 @@ class ForestClassifier(sklearn_ForestClassifier, BaseForest):
787
785
 
788
786
  return patching_status
789
787
 
790
- @run_with_n_jobs
791
788
  def _onedal_predict(self, X, queue=None):
792
789
  X = check_array(
793
790
  X,
@@ -802,7 +799,6 @@ class ForestClassifier(sklearn_ForestClassifier, BaseForest):
802
799
  res = self._onedal_estimator.predict(X, queue=queue)
803
800
  return np.take(self.classes_, res.ravel().astype(np.int64, casting="unsafe"))
804
801
 
805
- @run_with_n_jobs
806
802
  def _onedal_predict_proba(self, X, queue=None):
807
803
  X = check_array(X, dtype=[np.float64, np.float32], force_all_finite=False)
808
804
  check_is_fitted(self, "_onedal_estimator")
@@ -1096,7 +1092,6 @@ class ForestRegressor(sklearn_ForestRegressor, BaseForest):
1096
1092
 
1097
1093
  return patching_status
1098
1094
 
1099
- @run_with_n_jobs
1100
1095
  def _onedal_predict(self, X, queue=None):
1101
1096
  X = check_array(
1102
1097
  X, dtype=[np.float64, np.float32], force_all_finite=False
@@ -1138,7 +1133,7 @@ class ForestRegressor(sklearn_ForestRegressor, BaseForest):
1138
1133
  predict.__doc__ = sklearn_ForestRegressor.predict.__doc__
1139
1134
 
1140
1135
 
1141
- @control_n_jobs
1136
+ @control_n_jobs(decorated_methods=["fit", "predict", "predict_proba"])
1142
1137
  class RandomForestClassifier(ForestClassifier):
1143
1138
  __doc__ = sklearn_RandomForestClassifier.__doc__
1144
1139
  _onedal_factory = onedal_RandomForestClassifier
@@ -1348,7 +1343,7 @@ class RandomForestClassifier(ForestClassifier):
1348
1343
  self.min_bin_size = min_bin_size
1349
1344
 
1350
1345
 
1351
- @control_n_jobs
1346
+ @control_n_jobs(decorated_methods=["fit", "predict"])
1352
1347
  class RandomForestRegressor(ForestRegressor):
1353
1348
  __doc__ = sklearn_RandomForestRegressor.__doc__
1354
1349
  _onedal_factory = onedal_RandomForestRegressor
@@ -1549,7 +1544,7 @@ class RandomForestRegressor(ForestRegressor):
1549
1544
  self.min_bin_size = min_bin_size
1550
1545
 
1551
1546
 
1552
- @control_n_jobs
1547
+ @control_n_jobs(decorated_methods=["fit", "predict", "predict_proba"])
1553
1548
  class ExtraTreesClassifier(ForestClassifier):
1554
1549
  __doc__ = sklearn_ExtraTreesClassifier.__doc__
1555
1550
  _onedal_factory = onedal_ExtraTreesClassifier
@@ -1759,7 +1754,7 @@ class ExtraTreesClassifier(ForestClassifier):
1759
1754
  self.min_bin_size = min_bin_size
1760
1755
 
1761
1756
 
1762
- @control_n_jobs
1757
+ @control_n_jobs(decorated_methods=["fit", "predict"])
1763
1758
  class ExtraTreesRegressor(ForestRegressor):
1764
1759
  __doc__ = sklearn_ExtraTreesRegressor.__doc__
1765
1760
  _onedal_factory = onedal_ExtraTreesRegressor
@@ -16,14 +16,13 @@
16
16
 
17
17
  from .coordinate_descent import ElasticNet, Lasso
18
18
  from .linear import LinearRegression
19
- from .logistic_path import LogisticRegression, logistic_regression_path
19
+ from .logistic_regression import LogisticRegression
20
20
  from .ridge import Ridge
21
21
 
22
22
  __all__ = [
23
23
  "Ridge",
24
24
  "LinearRegression",
25
25
  "LogisticRegression",
26
- "logistic_regression_path",
27
26
  "ElasticNet",
28
27
  "Lasso",
29
28
  ]