scikit-learn-intelex 2025.1.0__py310-none-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of scikit-learn-intelex might be problematic. Click here for more details.

Files changed (280) hide show
  1. daal4py/__init__.py +73 -0
  2. daal4py/__main__.py +58 -0
  3. daal4py/_daal4py.cpython-310-x86_64-linux-gnu.so +0 -0
  4. daal4py/doc/third-party-programs.txt +424 -0
  5. daal4py/mb/__init__.py +19 -0
  6. daal4py/mb/model_builders.py +377 -0
  7. daal4py/mpi_transceiver.cpython-310-x86_64-linux-gnu.so +0 -0
  8. daal4py/sklearn/__init__.py +40 -0
  9. daal4py/sklearn/_n_jobs_support.py +248 -0
  10. daal4py/sklearn/_utils.py +245 -0
  11. daal4py/sklearn/cluster/__init__.py +20 -0
  12. daal4py/sklearn/cluster/dbscan.py +165 -0
  13. daal4py/sklearn/cluster/k_means.py +597 -0
  14. daal4py/sklearn/cluster/tests/test_dbscan.py +109 -0
  15. daal4py/sklearn/decomposition/__init__.py +19 -0
  16. daal4py/sklearn/decomposition/_pca.py +524 -0
  17. daal4py/sklearn/ensemble/AdaBoostClassifier.py +196 -0
  18. daal4py/sklearn/ensemble/GBTDAAL.py +337 -0
  19. daal4py/sklearn/ensemble/__init__.py +27 -0
  20. daal4py/sklearn/ensemble/_forest.py +1397 -0
  21. daal4py/sklearn/ensemble/tests/test_decision_forest.py +206 -0
  22. daal4py/sklearn/linear_model/__init__.py +29 -0
  23. daal4py/sklearn/linear_model/_coordinate_descent.py +848 -0
  24. daal4py/sklearn/linear_model/_linear.py +272 -0
  25. daal4py/sklearn/linear_model/_ridge.py +325 -0
  26. daal4py/sklearn/linear_model/coordinate_descent.py +17 -0
  27. daal4py/sklearn/linear_model/linear.py +17 -0
  28. daal4py/sklearn/linear_model/logistic_loss.py +195 -0
  29. daal4py/sklearn/linear_model/logistic_path.py +1026 -0
  30. daal4py/sklearn/linear_model/ridge.py +17 -0
  31. daal4py/sklearn/linear_model/tests/test_linear.py +208 -0
  32. daal4py/sklearn/linear_model/tests/test_ridge.py +69 -0
  33. daal4py/sklearn/manifold/__init__.py +19 -0
  34. daal4py/sklearn/manifold/_t_sne.py +405 -0
  35. daal4py/sklearn/metrics/__init__.py +20 -0
  36. daal4py/sklearn/metrics/_pairwise.py +236 -0
  37. daal4py/sklearn/metrics/_ranking.py +210 -0
  38. daal4py/sklearn/model_selection/__init__.py +19 -0
  39. daal4py/sklearn/model_selection/_split.py +309 -0
  40. daal4py/sklearn/model_selection/tests/test_split.py +56 -0
  41. daal4py/sklearn/monkeypatch/__init__.py +0 -0
  42. daal4py/sklearn/monkeypatch/dispatcher.py +232 -0
  43. daal4py/sklearn/monkeypatch/tests/_models_info.py +161 -0
  44. daal4py/sklearn/monkeypatch/tests/test_monkeypatch.py +71 -0
  45. daal4py/sklearn/monkeypatch/tests/test_patching.py +90 -0
  46. daal4py/sklearn/monkeypatch/tests/utils/_launch_algorithms.py +117 -0
  47. daal4py/sklearn/neighbors/__init__.py +21 -0
  48. daal4py/sklearn/neighbors/_base.py +503 -0
  49. daal4py/sklearn/neighbors/_classification.py +139 -0
  50. daal4py/sklearn/neighbors/_regression.py +74 -0
  51. daal4py/sklearn/neighbors/_unsupervised.py +55 -0
  52. daal4py/sklearn/neighbors/tests/test_kneighbors.py +113 -0
  53. daal4py/sklearn/svm/__init__.py +19 -0
  54. daal4py/sklearn/svm/svm.py +734 -0
  55. daal4py/sklearn/utils/__init__.py +21 -0
  56. daal4py/sklearn/utils/base.py +75 -0
  57. daal4py/sklearn/utils/tests/test_utils.py +51 -0
  58. daal4py/sklearn/utils/validation.py +693 -0
  59. onedal/__init__.py +83 -0
  60. onedal/_config.py +54 -0
  61. onedal/_device_offload.py +222 -0
  62. onedal/_onedal_py_dpc.cpython-310-x86_64-linux-gnu.so +0 -0
  63. onedal/_onedal_py_host.cpython-310-x86_64-linux-gnu.so +0 -0
  64. onedal/_onedal_py_spmd_dpc.cpython-310-x86_64-linux-gnu.so +0 -0
  65. onedal/basic_statistics/__init__.py +20 -0
  66. onedal/basic_statistics/basic_statistics.py +107 -0
  67. onedal/basic_statistics/incremental_basic_statistics.py +160 -0
  68. onedal/basic_statistics/tests/test_basic_statistics.py +298 -0
  69. onedal/basic_statistics/tests/test_incremental_basic_statistics.py +196 -0
  70. onedal/cluster/__init__.py +27 -0
  71. onedal/cluster/dbscan.py +110 -0
  72. onedal/cluster/kmeans.py +564 -0
  73. onedal/cluster/kmeans_init.py +115 -0
  74. onedal/cluster/tests/test_dbscan.py +125 -0
  75. onedal/cluster/tests/test_kmeans.py +88 -0
  76. onedal/cluster/tests/test_kmeans_init.py +93 -0
  77. onedal/common/_base.py +38 -0
  78. onedal/common/_estimator_checks.py +47 -0
  79. onedal/common/_mixin.py +62 -0
  80. onedal/common/_policy.py +59 -0
  81. onedal/common/_spmd_policy.py +30 -0
  82. onedal/common/hyperparameters.py +125 -0
  83. onedal/common/tests/test_policy.py +76 -0
  84. onedal/covariance/__init__.py +20 -0
  85. onedal/covariance/covariance.py +125 -0
  86. onedal/covariance/incremental_covariance.py +146 -0
  87. onedal/covariance/tests/test_covariance.py +50 -0
  88. onedal/covariance/tests/test_incremental_covariance.py +122 -0
  89. onedal/datatypes/__init__.py +19 -0
  90. onedal/datatypes/_data_conversion.py +154 -0
  91. onedal/datatypes/tests/common.py +126 -0
  92. onedal/datatypes/tests/test_data.py +414 -0
  93. onedal/decomposition/__init__.py +20 -0
  94. onedal/decomposition/incremental_pca.py +204 -0
  95. onedal/decomposition/pca.py +186 -0
  96. onedal/decomposition/tests/test_incremental_pca.py +198 -0
  97. onedal/ensemble/__init__.py +29 -0
  98. onedal/ensemble/forest.py +727 -0
  99. onedal/ensemble/tests/test_random_forest.py +97 -0
  100. onedal/linear_model/__init__.py +27 -0
  101. onedal/linear_model/incremental_linear_model.py +258 -0
  102. onedal/linear_model/linear_model.py +329 -0
  103. onedal/linear_model/logistic_regression.py +249 -0
  104. onedal/linear_model/tests/test_incremental_linear_regression.py +168 -0
  105. onedal/linear_model/tests/test_incremental_ridge_regression.py +107 -0
  106. onedal/linear_model/tests/test_linear_regression.py +250 -0
  107. onedal/linear_model/tests/test_logistic_regression.py +95 -0
  108. onedal/linear_model/tests/test_ridge.py +95 -0
  109. onedal/neighbors/__init__.py +19 -0
  110. onedal/neighbors/neighbors.py +767 -0
  111. onedal/neighbors/tests/test_knn_classification.py +49 -0
  112. onedal/primitives/__init__.py +27 -0
  113. onedal/primitives/get_tree.py +25 -0
  114. onedal/primitives/kernel_functions.py +153 -0
  115. onedal/primitives/tests/test_kernel_functions.py +159 -0
  116. onedal/spmd/__init__.py +25 -0
  117. onedal/spmd/_base.py +30 -0
  118. onedal/spmd/basic_statistics/__init__.py +20 -0
  119. onedal/spmd/basic_statistics/basic_statistics.py +30 -0
  120. onedal/spmd/basic_statistics/incremental_basic_statistics.py +69 -0
  121. onedal/spmd/cluster/__init__.py +28 -0
  122. onedal/spmd/cluster/dbscan.py +23 -0
  123. onedal/spmd/cluster/kmeans.py +56 -0
  124. onedal/spmd/covariance/__init__.py +20 -0
  125. onedal/spmd/covariance/covariance.py +26 -0
  126. onedal/spmd/covariance/incremental_covariance.py +82 -0
  127. onedal/spmd/decomposition/__init__.py +20 -0
  128. onedal/spmd/decomposition/incremental_pca.py +117 -0
  129. onedal/spmd/decomposition/pca.py +26 -0
  130. onedal/spmd/ensemble/__init__.py +19 -0
  131. onedal/spmd/ensemble/forest.py +28 -0
  132. onedal/spmd/linear_model/__init__.py +21 -0
  133. onedal/spmd/linear_model/incremental_linear_model.py +97 -0
  134. onedal/spmd/linear_model/linear_model.py +30 -0
  135. onedal/spmd/linear_model/logistic_regression.py +38 -0
  136. onedal/spmd/neighbors/__init__.py +19 -0
  137. onedal/spmd/neighbors/neighbors.py +75 -0
  138. onedal/svm/__init__.py +19 -0
  139. onedal/svm/svm.py +556 -0
  140. onedal/svm/tests/test_csr_svm.py +351 -0
  141. onedal/svm/tests/test_nusvc.py +204 -0
  142. onedal/svm/tests/test_nusvr.py +210 -0
  143. onedal/svm/tests/test_svc.py +176 -0
  144. onedal/svm/tests/test_svr.py +243 -0
  145. onedal/tests/test_common.py +57 -0
  146. onedal/tests/utils/_dataframes_support.py +162 -0
  147. onedal/tests/utils/_device_selection.py +102 -0
  148. onedal/utils/__init__.py +49 -0
  149. onedal/utils/_array_api.py +81 -0
  150. onedal/utils/_dpep_helpers.py +56 -0
  151. onedal/utils/validation.py +440 -0
  152. scikit_learn_intelex-2025.1.0.dist-info/LICENSE.txt +202 -0
  153. scikit_learn_intelex-2025.1.0.dist-info/METADATA +231 -0
  154. scikit_learn_intelex-2025.1.0.dist-info/RECORD +280 -0
  155. scikit_learn_intelex-2025.1.0.dist-info/WHEEL +5 -0
  156. scikit_learn_intelex-2025.1.0.dist-info/top_level.txt +3 -0
  157. sklearnex/__init__.py +66 -0
  158. sklearnex/__main__.py +58 -0
  159. sklearnex/_config.py +116 -0
  160. sklearnex/_device_offload.py +126 -0
  161. sklearnex/_utils.py +132 -0
  162. sklearnex/basic_statistics/__init__.py +20 -0
  163. sklearnex/basic_statistics/basic_statistics.py +230 -0
  164. sklearnex/basic_statistics/incremental_basic_statistics.py +345 -0
  165. sklearnex/basic_statistics/tests/test_basic_statistics.py +270 -0
  166. sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +404 -0
  167. sklearnex/cluster/__init__.py +20 -0
  168. sklearnex/cluster/dbscan.py +197 -0
  169. sklearnex/cluster/k_means.py +395 -0
  170. sklearnex/cluster/tests/test_dbscan.py +38 -0
  171. sklearnex/cluster/tests/test_kmeans.py +159 -0
  172. sklearnex/conftest.py +82 -0
  173. sklearnex/covariance/__init__.py +19 -0
  174. sklearnex/covariance/incremental_covariance.py +398 -0
  175. sklearnex/covariance/tests/test_incremental_covariance.py +237 -0
  176. sklearnex/decomposition/__init__.py +19 -0
  177. sklearnex/decomposition/pca.py +425 -0
  178. sklearnex/decomposition/tests/test_pca.py +58 -0
  179. sklearnex/dispatcher.py +543 -0
  180. sklearnex/doc/third-party-programs.txt +424 -0
  181. sklearnex/ensemble/__init__.py +29 -0
  182. sklearnex/ensemble/_forest.py +2029 -0
  183. sklearnex/ensemble/tests/test_forest.py +135 -0
  184. sklearnex/glob/__main__.py +72 -0
  185. sklearnex/glob/dispatcher.py +101 -0
  186. sklearnex/linear_model/__init__.py +32 -0
  187. sklearnex/linear_model/coordinate_descent.py +30 -0
  188. sklearnex/linear_model/incremental_linear.py +482 -0
  189. sklearnex/linear_model/incremental_ridge.py +425 -0
  190. sklearnex/linear_model/linear.py +341 -0
  191. sklearnex/linear_model/logistic_regression.py +413 -0
  192. sklearnex/linear_model/ridge.py +24 -0
  193. sklearnex/linear_model/tests/test_incremental_linear.py +207 -0
  194. sklearnex/linear_model/tests/test_incremental_ridge.py +153 -0
  195. sklearnex/linear_model/tests/test_linear.py +167 -0
  196. sklearnex/linear_model/tests/test_logreg.py +134 -0
  197. sklearnex/manifold/__init__.py +19 -0
  198. sklearnex/manifold/t_sne.py +21 -0
  199. sklearnex/manifold/tests/test_tsne.py +26 -0
  200. sklearnex/metrics/__init__.py +23 -0
  201. sklearnex/metrics/pairwise.py +22 -0
  202. sklearnex/metrics/ranking.py +20 -0
  203. sklearnex/metrics/tests/test_metrics.py +39 -0
  204. sklearnex/model_selection/__init__.py +21 -0
  205. sklearnex/model_selection/split.py +22 -0
  206. sklearnex/model_selection/tests/test_model_selection.py +34 -0
  207. sklearnex/neighbors/__init__.py +27 -0
  208. sklearnex/neighbors/_lof.py +236 -0
  209. sklearnex/neighbors/common.py +310 -0
  210. sklearnex/neighbors/knn_classification.py +231 -0
  211. sklearnex/neighbors/knn_regression.py +207 -0
  212. sklearnex/neighbors/knn_unsupervised.py +178 -0
  213. sklearnex/neighbors/tests/test_neighbors.py +82 -0
  214. sklearnex/preview/__init__.py +17 -0
  215. sklearnex/preview/covariance/__init__.py +19 -0
  216. sklearnex/preview/covariance/covariance.py +138 -0
  217. sklearnex/preview/covariance/tests/test_covariance.py +66 -0
  218. sklearnex/preview/decomposition/__init__.py +19 -0
  219. sklearnex/preview/decomposition/incremental_pca.py +233 -0
  220. sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
  221. sklearnex/preview/linear_model/__init__.py +19 -0
  222. sklearnex/preview/linear_model/ridge.py +424 -0
  223. sklearnex/preview/linear_model/tests/test_ridge.py +102 -0
  224. sklearnex/spmd/__init__.py +25 -0
  225. sklearnex/spmd/basic_statistics/__init__.py +20 -0
  226. sklearnex/spmd/basic_statistics/basic_statistics.py +21 -0
  227. sklearnex/spmd/basic_statistics/incremental_basic_statistics.py +30 -0
  228. sklearnex/spmd/basic_statistics/tests/test_basic_statistics_spmd.py +107 -0
  229. sklearnex/spmd/basic_statistics/tests/test_incremental_basic_statistics_spmd.py +307 -0
  230. sklearnex/spmd/cluster/__init__.py +30 -0
  231. sklearnex/spmd/cluster/dbscan.py +50 -0
  232. sklearnex/spmd/cluster/kmeans.py +21 -0
  233. sklearnex/spmd/cluster/tests/test_dbscan_spmd.py +97 -0
  234. sklearnex/spmd/cluster/tests/test_kmeans_spmd.py +172 -0
  235. sklearnex/spmd/covariance/__init__.py +20 -0
  236. sklearnex/spmd/covariance/covariance.py +21 -0
  237. sklearnex/spmd/covariance/incremental_covariance.py +37 -0
  238. sklearnex/spmd/covariance/tests/test_covariance_spmd.py +107 -0
  239. sklearnex/spmd/covariance/tests/test_incremental_covariance_spmd.py +184 -0
  240. sklearnex/spmd/decomposition/__init__.py +20 -0
  241. sklearnex/spmd/decomposition/incremental_pca.py +30 -0
  242. sklearnex/spmd/decomposition/pca.py +21 -0
  243. sklearnex/spmd/decomposition/tests/test_incremental_pca_spmd.py +269 -0
  244. sklearnex/spmd/decomposition/tests/test_pca_spmd.py +128 -0
  245. sklearnex/spmd/ensemble/__init__.py +19 -0
  246. sklearnex/spmd/ensemble/forest.py +71 -0
  247. sklearnex/spmd/ensemble/tests/test_forest_spmd.py +265 -0
  248. sklearnex/spmd/linear_model/__init__.py +21 -0
  249. sklearnex/spmd/linear_model/incremental_linear_model.py +35 -0
  250. sklearnex/spmd/linear_model/linear_model.py +21 -0
  251. sklearnex/spmd/linear_model/logistic_regression.py +21 -0
  252. sklearnex/spmd/linear_model/tests/test_incremental_linear_spmd.py +329 -0
  253. sklearnex/spmd/linear_model/tests/test_linear_regression_spmd.py +145 -0
  254. sklearnex/spmd/linear_model/tests/test_logistic_regression_spmd.py +162 -0
  255. sklearnex/spmd/neighbors/__init__.py +19 -0
  256. sklearnex/spmd/neighbors/neighbors.py +25 -0
  257. sklearnex/spmd/neighbors/tests/test_neighbors_spmd.py +288 -0
  258. sklearnex/svm/__init__.py +29 -0
  259. sklearnex/svm/_common.py +339 -0
  260. sklearnex/svm/nusvc.py +371 -0
  261. sklearnex/svm/nusvr.py +170 -0
  262. sklearnex/svm/svc.py +399 -0
  263. sklearnex/svm/svr.py +167 -0
  264. sklearnex/svm/tests/test_svm.py +93 -0
  265. sklearnex/tests/test_common.py +390 -0
  266. sklearnex/tests/test_config.py +123 -0
  267. sklearnex/tests/test_memory_usage.py +379 -0
  268. sklearnex/tests/test_monkeypatch.py +276 -0
  269. sklearnex/tests/test_n_jobs_support.py +108 -0
  270. sklearnex/tests/test_parallel.py +48 -0
  271. sklearnex/tests/test_patching.py +385 -0
  272. sklearnex/tests/test_run_to_run_stability.py +321 -0
  273. sklearnex/tests/utils/__init__.py +44 -0
  274. sklearnex/tests/utils/base.py +371 -0
  275. sklearnex/tests/utils/spmd.py +198 -0
  276. sklearnex/utils/__init__.py +19 -0
  277. sklearnex/utils/_array_api.py +82 -0
  278. sklearnex/utils/parallel.py +59 -0
  279. sklearnex/utils/tests/test_finite.py +89 -0
  280. sklearnex/utils/validation.py +17 -0
@@ -0,0 +1,298 @@
1
+ # ==============================================================================
2
+ # Copyright 2023 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ==============================================================================
16
+
17
+ import numpy as np
18
+ import pytest
19
+ from numpy.testing import assert_allclose
20
+ from scipy import sparse as sp
21
+
22
+ from daal4py.sklearn._utils import daal_check_version
23
+ from onedal.basic_statistics import BasicStatistics
24
+ from onedal.tests.utils._device_selection import get_queues
25
+
26
+
27
+ def expected_sum(X):
28
+ return np.sum(X, axis=0)
29
+
30
+
31
+ def expected_max(X):
32
+ return np.max(X, axis=0)
33
+
34
+
35
+ def expected_min(X):
36
+ return np.min(X, axis=0)
37
+
38
+
39
+ def expected_mean(X):
40
+ return np.mean(X, axis=0)
41
+
42
+
43
+ def expected_standard_deviation(X):
44
+ return np.std(X, axis=0)
45
+
46
+
47
+ def expected_variance(X):
48
+ return np.var(X, axis=0)
49
+
50
+
51
+ def expected_variation(X):
52
+ return expected_standard_deviation(X) / expected_mean(X)
53
+
54
+
55
+ def expected_sum_squares(X):
56
+ return np.sum(np.square(X), axis=0)
57
+
58
+
59
+ def expected_sum_squares_centered(X):
60
+ return np.sum(np.square(X - expected_mean(X)), axis=0)
61
+
62
+
63
+ def expected_standard_deviation(X):
64
+ return np.sqrt(expected_variance(X))
65
+
66
+
67
+ def expected_second_order_raw_moment(X):
68
+ return np.mean(np.square(X), axis=0)
69
+
70
+
71
+ options_and_tests = [
72
+ ("sum", expected_sum, (5e-4, 1e-7)),
73
+ ("min", expected_min, (1e-7, 1e-7)),
74
+ ("max", expected_max, (1e-7, 1e-7)),
75
+ ("mean", expected_mean, (5e-7, 1e-7)),
76
+ ("variance", expected_variance, (2e-3, 2e-3)),
77
+ ("variation", expected_variation, (5e-2, 5e-2)),
78
+ ("sum_squares", expected_sum_squares, (2e-4, 1e-7)),
79
+ ("sum_squares_centered", expected_sum_squares_centered, (2e-4, 1e-7)),
80
+ ("standard_deviation", expected_standard_deviation, (2e-3, 2e-3)),
81
+ ("second_order_raw_moment", expected_second_order_raw_moment, (1e-6, 1e-7)),
82
+ ]
83
+
84
+ options_and_tests_csr = [
85
+ ("sum", "sum", (5e-6, 1e-9)),
86
+ ("min", "min", (0, 0)),
87
+ ("max", "max", (0, 0)),
88
+ ("mean", "mean", (5e-6, 1e-9)),
89
+ ]
90
+
91
+
92
+ @pytest.mark.parametrize("queue", get_queues())
93
+ @pytest.mark.parametrize("option", options_and_tests)
94
+ @pytest.mark.parametrize("row_count", [100, 1000])
95
+ @pytest.mark.parametrize("column_count", [10, 100])
96
+ @pytest.mark.parametrize("weighted", [True, False])
97
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
98
+ def test_single_option_on_random_data(
99
+ queue, option, row_count, column_count, weighted, dtype
100
+ ):
101
+ result_option, function, tols = option
102
+ fp32tol, fp64tol = tols
103
+ seed = 77
104
+ gen = np.random.default_rng(seed)
105
+ data = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
106
+ data = data.astype(dtype=dtype)
107
+ if weighted:
108
+ weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
109
+ weights = weights.astype(dtype=dtype)
110
+ else:
111
+ weights = None
112
+
113
+ basicstat = BasicStatistics(result_options=result_option)
114
+
115
+ result = basicstat.fit(data, sample_weight=weights, queue=queue)
116
+
117
+ res = getattr(result, result_option)
118
+ if weighted:
119
+ weighted_data = np.diag(weights) @ data
120
+ gtr = function(weighted_data)
121
+ else:
122
+ gtr = function(data)
123
+
124
+ tol = fp32tol if res.dtype == np.float32 else fp64tol
125
+ assert_allclose(gtr, res, atol=tol)
126
+
127
+
128
+ @pytest.mark.parametrize("queue", get_queues())
129
+ @pytest.mark.parametrize("row_count", [100, 1000])
130
+ @pytest.mark.parametrize("column_count", [10, 100])
131
+ @pytest.mark.parametrize("weighted", [True, False])
132
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
133
+ def test_multiple_options_on_random_data(queue, row_count, column_count, weighted, dtype):
134
+ seed = 42
135
+ gen = np.random.default_rng(seed)
136
+ data = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
137
+ data = data.astype(dtype=dtype)
138
+
139
+ if weighted:
140
+ weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
141
+ weights = weights.astype(dtype=dtype)
142
+ else:
143
+ weights = None
144
+
145
+ basicstat = BasicStatistics(result_options=["mean", "max", "sum"])
146
+
147
+ result = basicstat.fit(data, sample_weight=weights, queue=queue)
148
+
149
+ res_mean, res_max, res_sum = result.mean, result.max, result.sum
150
+ if weighted:
151
+ weighted_data = np.diag(weights) @ data
152
+ gtr_mean, gtr_max, gtr_sum = (
153
+ expected_mean(weighted_data),
154
+ expected_max(weighted_data),
155
+ expected_sum(weighted_data),
156
+ )
157
+ else:
158
+ gtr_mean, gtr_max, gtr_sum = (
159
+ expected_mean(data),
160
+ expected_max(data),
161
+ expected_sum(data),
162
+ )
163
+
164
+ tol = 5e-4 if res_mean.dtype == np.float32 else 1e-7
165
+ assert_allclose(gtr_mean, res_mean, atol=tol)
166
+ assert_allclose(gtr_max, res_max, atol=tol)
167
+ assert_allclose(gtr_sum, res_sum, atol=tol)
168
+
169
+
170
+ @pytest.mark.parametrize("queue", get_queues())
171
+ @pytest.mark.parametrize("row_count", [100, 1000])
172
+ @pytest.mark.parametrize("column_count", [10, 100])
173
+ @pytest.mark.parametrize("weighted", [True, False])
174
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
175
+ def test_all_option_on_random_data(queue, row_count, column_count, weighted, dtype):
176
+ seed = 77
177
+ gen = np.random.default_rng(seed)
178
+ data = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
179
+ data = data.astype(dtype=dtype)
180
+ if weighted:
181
+ weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
182
+ weights = weights.astype(dtype=dtype)
183
+ else:
184
+ weights = None
185
+
186
+ basicstat = BasicStatistics(result_options="all")
187
+
188
+ result = basicstat.fit(data, sample_weight=weights, queue=queue)
189
+
190
+ if weighted:
191
+ weighted_data = np.diag(weights) @ data
192
+
193
+ for option in options_and_tests:
194
+ result_option, function, tols = option
195
+ fp32tol, fp64tol = tols
196
+ res = getattr(result, result_option)
197
+ if weighted:
198
+ gtr = function(weighted_data)
199
+ else:
200
+ gtr = function(data)
201
+ tol = fp32tol if res.dtype == np.float32 else fp64tol
202
+ assert_allclose(gtr, res, atol=tol)
203
+
204
+
205
+ @pytest.mark.parametrize("queue", get_queues())
206
+ @pytest.mark.parametrize("option", options_and_tests)
207
+ @pytest.mark.parametrize("data_size", [100, 1000])
208
+ @pytest.mark.parametrize("weighted", [True, False])
209
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
210
+ def test_1d_input_on_random_data(queue, option, data_size, weighted, dtype):
211
+ result_option, function, tols = option
212
+ fp32tol, fp64tol = tols
213
+ seed = 77
214
+ gen = np.random.default_rng(seed)
215
+ data = gen.uniform(low=-0.3, high=+0.7, size=data_size)
216
+ data = data.astype(dtype=dtype)
217
+ if weighted:
218
+ weights = gen.uniform(low=-0.5, high=+1.0, size=data_size)
219
+ weights = weights.astype(dtype=dtype)
220
+ else:
221
+ weights = None
222
+
223
+ basicstat = BasicStatistics(result_options=result_option)
224
+
225
+ result = basicstat.fit(data, sample_weight=weights, queue=queue)
226
+
227
+ res = getattr(result, result_option)
228
+ if weighted:
229
+ weighted_data = weights * data
230
+ gtr = function(weighted_data)
231
+ else:
232
+ gtr = function(data)
233
+
234
+ tol = fp32tol if res.dtype == np.float32 else fp64tol
235
+ assert_allclose(gtr, res, atol=tol)
236
+
237
+
238
+ @pytest.mark.skipif(not hasattr(sp, "random_array"), reason="requires scipy>=1.12.0")
239
+ @pytest.mark.parametrize("queue", get_queues())
240
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
241
+ def test_basic_csr(queue, dtype):
242
+ seed = 42
243
+ row_count, column_count = 5000, 3008
244
+
245
+ gen = np.random.default_rng(seed)
246
+
247
+ data = sp.random_array(
248
+ shape=(row_count, column_count),
249
+ density=0.01,
250
+ format="csr",
251
+ dtype=dtype,
252
+ random_state=gen,
253
+ )
254
+
255
+ basicstat = BasicStatistics(result_options="mean")
256
+ result = basicstat.fit(data, queue=queue)
257
+
258
+ res_mean = result.mean
259
+ gtr_mean = data.mean(axis=0)
260
+ tol = 5e-6 if res_mean.dtype == np.float32 else 1e-9
261
+ assert_allclose(gtr_mean, res_mean, rtol=tol)
262
+
263
+
264
+ @pytest.mark.skipif(not hasattr(sp, "random_array"), reason="requires scipy>=1.12.0")
265
+ @pytest.mark.parametrize("queue", get_queues())
266
+ @pytest.mark.parametrize("option", options_and_tests_csr)
267
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
268
+ def test_options_csr(queue, option, dtype):
269
+ result_option, function, tols = option
270
+ fp32tol, fp64tol = tols
271
+
272
+ if result_option == "max":
273
+ pytest.skip("There is a bug in oneDAL's max computations on GPU")
274
+
275
+ seed = 42
276
+ row_count, column_count = 20046, 4007
277
+
278
+ gen = np.random.default_rng(seed)
279
+
280
+ data = sp.random_array(
281
+ shape=(row_count, column_count),
282
+ density=0.002,
283
+ format="csr",
284
+ dtype=dtype,
285
+ random_state=gen,
286
+ )
287
+
288
+ basicstat = BasicStatistics(result_options=result_option)
289
+ result = basicstat.fit(data, queue=queue)
290
+
291
+ res = getattr(result, result_option)
292
+ func = getattr(data, function)
293
+ gtr = func(axis=0)
294
+ if type(gtr).__name__ != "ndarray":
295
+ gtr = gtr.toarray().flatten()
296
+ tol = fp32tol if res.dtype == np.float32 else fp64tol
297
+
298
+ assert_allclose(gtr, res, rtol=tol)
@@ -0,0 +1,196 @@
1
+ # ==============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ==============================================================================
16
+
17
+ import numpy as np
18
+ import pytest
19
+ from numpy.testing import assert_allclose
20
+
21
+ from onedal.basic_statistics import IncrementalBasicStatistics
22
+ from onedal.basic_statistics.tests.test_basic_statistics import (
23
+ expected_max,
24
+ expected_mean,
25
+ expected_sum,
26
+ options_and_tests,
27
+ )
28
+ from onedal.tests.utils._device_selection import get_queues
29
+
30
+
31
+ @pytest.mark.parametrize("queue", get_queues())
32
+ @pytest.mark.parametrize("weighted", [True, False])
33
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
34
+ def test_multiple_options_on_gold_data(queue, weighted, dtype):
35
+ X = np.array([[0, 0], [1, 1]])
36
+ X = X.astype(dtype=dtype)
37
+ X_split = np.array_split(X, 2)
38
+ if weighted:
39
+ weights = np.array([1, 0.5])
40
+ weights = weights.astype(dtype=dtype)
41
+ weights_split = np.array_split(weights, 2)
42
+
43
+ incbs = IncrementalBasicStatistics()
44
+ for i in range(2):
45
+ if weighted:
46
+ incbs.partial_fit(X_split[i], weights_split[i], queue=queue)
47
+ else:
48
+ incbs.partial_fit(X_split[i], queue=queue)
49
+
50
+ result = incbs.finalize_fit()
51
+
52
+ if weighted:
53
+ expected_weighted_mean = np.array([0.25, 0.25])
54
+ expected_weighted_min = np.array([0, 0])
55
+ expected_weighted_max = np.array([0.5, 0.5])
56
+ assert_allclose(expected_weighted_mean, result.mean)
57
+ assert_allclose(expected_weighted_max, result.max)
58
+ assert_allclose(expected_weighted_min, result.min)
59
+ else:
60
+ expected_mean = np.array([0.5, 0.5])
61
+ expected_min = np.array([0, 0])
62
+ expected_max = np.array([1, 1])
63
+ assert_allclose(expected_mean, result.mean)
64
+ assert_allclose(expected_max, result.max)
65
+ assert_allclose(expected_min, result.min)
66
+
67
+
68
+ @pytest.mark.parametrize("queue", get_queues())
69
+ @pytest.mark.parametrize("num_batches", [2, 10])
70
+ @pytest.mark.parametrize("option", options_and_tests)
71
+ @pytest.mark.parametrize("row_count", [100, 1000])
72
+ @pytest.mark.parametrize("column_count", [10, 100])
73
+ @pytest.mark.parametrize("weighted", [True, False])
74
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
75
+ def test_single_option_on_random_data(
76
+ queue, num_batches, option, row_count, column_count, weighted, dtype
77
+ ):
78
+ result_option, function, tols = option
79
+ fp32tol, fp64tol = tols
80
+ seed = 77
81
+ gen = np.random.default_rng(seed)
82
+ data = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
83
+ data = data.astype(dtype=dtype)
84
+ data_split = np.array_split(data, num_batches)
85
+ if weighted:
86
+ weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
87
+ weights = weights.astype(dtype=dtype)
88
+ weights_split = np.array_split(weights, num_batches)
89
+ incbs = IncrementalBasicStatistics(result_options=result_option)
90
+
91
+ for i in range(num_batches):
92
+ if weighted:
93
+ incbs.partial_fit(data_split[i], weights_split[i], queue=queue)
94
+ else:
95
+ incbs.partial_fit(data_split[i], queue=queue)
96
+ result = incbs.finalize_fit()
97
+
98
+ res = getattr(result, result_option)
99
+ if weighted:
100
+ weighted_data = np.diag(weights) @ data
101
+ gtr = function(weighted_data)
102
+ else:
103
+ gtr = function(data)
104
+
105
+ tol = fp32tol if res.dtype == np.float32 else fp64tol
106
+ assert_allclose(gtr, res, atol=tol)
107
+
108
+
109
+ @pytest.mark.parametrize("queue", get_queues())
110
+ @pytest.mark.parametrize("num_batches", [2, 10])
111
+ @pytest.mark.parametrize("row_count", [100, 1000])
112
+ @pytest.mark.parametrize("column_count", [10, 100])
113
+ @pytest.mark.parametrize("weighted", [True, False])
114
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
115
+ def test_multiple_options_on_random_data(
116
+ queue, num_batches, row_count, column_count, weighted, dtype
117
+ ):
118
+ seed = 42
119
+ gen = np.random.default_rng(seed)
120
+ data = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
121
+ data = data.astype(dtype=dtype)
122
+ data_split = np.array_split(data, num_batches)
123
+ if weighted:
124
+ weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
125
+ weights = weights.astype(dtype=dtype)
126
+ weights_split = np.array_split(weights, num_batches)
127
+ incbs = IncrementalBasicStatistics(result_options=["mean", "max", "sum"])
128
+
129
+ for i in range(num_batches):
130
+ if weighted:
131
+ incbs.partial_fit(data_split[i], weights_split[i], queue=queue)
132
+ else:
133
+ incbs.partial_fit(data_split[i], queue=queue)
134
+ result = incbs.finalize_fit()
135
+
136
+ res_mean, res_max, res_sum = result.mean, result.max, result.sum
137
+ if weighted:
138
+ weighted_data = np.diag(weights) @ data
139
+ gtr_mean, gtr_max, gtr_sum = (
140
+ expected_mean(weighted_data),
141
+ expected_max(weighted_data),
142
+ expected_sum(weighted_data),
143
+ )
144
+ else:
145
+ gtr_mean, gtr_max, gtr_sum = (
146
+ expected_mean(data),
147
+ expected_max(data),
148
+ expected_sum(data),
149
+ )
150
+
151
+ tol = 3e-4 if res_mean.dtype == np.float32 else 1e-7
152
+ assert_allclose(gtr_mean, res_mean, atol=tol)
153
+ assert_allclose(gtr_max, res_max, atol=tol)
154
+ assert_allclose(gtr_sum, res_sum, atol=tol)
155
+
156
+
157
+ @pytest.mark.parametrize("queue", get_queues())
158
+ @pytest.mark.parametrize("num_batches", [2, 10])
159
+ @pytest.mark.parametrize("row_count", [100, 1000])
160
+ @pytest.mark.parametrize("column_count", [10, 100])
161
+ @pytest.mark.parametrize("weighted", [True, False])
162
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
163
+ def test_all_option_on_random_data(
164
+ queue, num_batches, row_count, column_count, weighted, dtype
165
+ ):
166
+ seed = 77
167
+ gen = np.random.default_rng(seed)
168
+ data = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
169
+ data = data.astype(dtype=dtype)
170
+ data_split = np.array_split(data, num_batches)
171
+ if weighted:
172
+ weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
173
+ weights = weights.astype(dtype=dtype)
174
+ weights_split = np.array_split(weights, num_batches)
175
+ incbs = IncrementalBasicStatistics(result_options="all")
176
+
177
+ for i in range(num_batches):
178
+ if weighted:
179
+ incbs.partial_fit(data_split[i], weights_split[i], queue=queue)
180
+ else:
181
+ incbs.partial_fit(data_split[i], queue=queue)
182
+ result = incbs.finalize_fit()
183
+
184
+ if weighted:
185
+ weighted_data = np.diag(weights) @ data
186
+
187
+ for option in options_and_tests:
188
+ result_option, function, tols = option
189
+ fp32tol, fp64tol = tols
190
+ res = getattr(result, result_option)
191
+ if weighted:
192
+ gtr = function(weighted_data)
193
+ else:
194
+ gtr = function(data)
195
+ tol = fp32tol if res.dtype == np.float32 else fp64tol
196
+ assert_allclose(gtr, res, atol=tol)
@@ -0,0 +1,27 @@
1
+ # ==============================================================================
2
+ # Copyright 2023 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ==============================================================================
16
+
17
+ from daal4py.sklearn._utils import daal_check_version
18
+
19
+ from .dbscan import DBSCAN
20
+ from .kmeans import KMeans, k_means
21
+
22
+ __all__ = ["DBSCAN", "KMeans", "k_means"]
23
+
24
+ if daal_check_version((2023, "P", 200)):
25
+ from .kmeans_init import KMeansInit, kmeans_plusplus
26
+
27
+ __all__ += ["KMeansInit", "kmeans_plusplus"]
@@ -0,0 +1,110 @@
1
+ # ===============================================================================
2
+ # Copyright 2023 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ import numpy as np
18
+
19
+ from daal4py.sklearn._utils import get_dtype, make2d
20
+
21
+ from ..common._base import BaseEstimator
22
+ from ..common._mixin import ClusterMixin
23
+ from ..datatypes import _convert_to_supported, from_table, to_table
24
+ from ..utils import _check_array
25
+
26
+
27
+ class BaseDBSCAN(BaseEstimator, ClusterMixin):
28
+ def __init__(
29
+ self,
30
+ eps=0.5,
31
+ *,
32
+ min_samples=5,
33
+ metric="euclidean",
34
+ metric_params=None,
35
+ algorithm="auto",
36
+ leaf_size=30,
37
+ p=None,
38
+ n_jobs=None,
39
+ ):
40
+ self.eps = eps
41
+ self.min_samples = min_samples
42
+ self.metric = metric
43
+ self.metric_params = metric_params
44
+ self.algorithm = algorithm
45
+ self.leaf_size = leaf_size
46
+ self.p = p
47
+ self.n_jobs = n_jobs
48
+
49
+ def _get_onedal_params(self, dtype=np.float32):
50
+ return {
51
+ "fptype": "float" if dtype == np.float32 else "double",
52
+ "method": "by_default",
53
+ "min_observations": int(self.min_samples),
54
+ "epsilon": float(self.eps),
55
+ "mem_save_mode": False,
56
+ "result_options": "core_observation_indices|responses",
57
+ }
58
+
59
+ def _fit(self, X, y, sample_weight, module, queue):
60
+ policy = self._get_policy(queue, X)
61
+ X = _check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
62
+ sample_weight = make2d(sample_weight) if sample_weight is not None else None
63
+ X = make2d(X)
64
+
65
+ types = [np.float32, np.float64]
66
+ if get_dtype(X) not in types:
67
+ X = X.astype(np.float64)
68
+ X = _convert_to_supported(policy, X)
69
+ dtype = get_dtype(X)
70
+ params = self._get_onedal_params(dtype)
71
+ result = module.compute(policy, params, to_table(X), to_table(sample_weight))
72
+
73
+ self.labels_ = from_table(result.responses).ravel()
74
+ if result.core_observation_indices is not None:
75
+ self.core_sample_indices_ = from_table(
76
+ result.core_observation_indices
77
+ ).ravel()
78
+ else:
79
+ self.core_sample_indices_ = np.array([], dtype=np.intc)
80
+ self.components_ = np.take(X, self.core_sample_indices_, axis=0)
81
+ self.n_features_in_ = X.shape[1]
82
+ return self
83
+
84
+
85
+ class DBSCAN(BaseDBSCAN):
86
+ def __init__(
87
+ self,
88
+ eps=0.5,
89
+ *,
90
+ min_samples=5,
91
+ metric="euclidean",
92
+ metric_params=None,
93
+ algorithm="auto",
94
+ leaf_size=30,
95
+ p=None,
96
+ n_jobs=None,
97
+ ):
98
+ self.eps = eps
99
+ self.min_samples = min_samples
100
+ self.metric = metric
101
+ self.metric_params = metric_params
102
+ self.algorithm = algorithm
103
+ self.leaf_size = leaf_size
104
+ self.p = p
105
+ self.n_jobs = n_jobs
106
+
107
+ def fit(self, X, y=None, sample_weight=None, queue=None):
108
+ return super()._fit(
109
+ X, y, sample_weight, self._get_backend("dbscan", "clustering", None), queue
110
+ )