scikit-learn-intelex 2025.4.0__py313-none-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of scikit-learn-intelex might be problematic. Click here for more details.

Files changed (282) hide show
  1. daal4py/__init__.py +73 -0
  2. daal4py/__main__.py +58 -0
  3. daal4py/_daal4py.cpython-313-x86_64-linux-gnu.so +0 -0
  4. daal4py/doc/third-party-programs.txt +424 -0
  5. daal4py/mb/__init__.py +19 -0
  6. daal4py/mb/model_builders.py +377 -0
  7. daal4py/mpi_transceiver.cpython-313-x86_64-linux-gnu.so +0 -0
  8. daal4py/sklearn/__init__.py +40 -0
  9. daal4py/sklearn/_n_jobs_support.py +248 -0
  10. daal4py/sklearn/_utils.py +245 -0
  11. daal4py/sklearn/cluster/__init__.py +20 -0
  12. daal4py/sklearn/cluster/dbscan.py +165 -0
  13. daal4py/sklearn/cluster/k_means.py +597 -0
  14. daal4py/sklearn/cluster/tests/test_dbscan.py +109 -0
  15. daal4py/sklearn/decomposition/__init__.py +19 -0
  16. daal4py/sklearn/decomposition/_pca.py +524 -0
  17. daal4py/sklearn/ensemble/AdaBoostClassifier.py +196 -0
  18. daal4py/sklearn/ensemble/GBTDAAL.py +337 -0
  19. daal4py/sklearn/ensemble/__init__.py +27 -0
  20. daal4py/sklearn/ensemble/_forest.py +1397 -0
  21. daal4py/sklearn/ensemble/tests/test_decision_forest.py +206 -0
  22. daal4py/sklearn/linear_model/__init__.py +29 -0
  23. daal4py/sklearn/linear_model/_coordinate_descent.py +848 -0
  24. daal4py/sklearn/linear_model/_linear.py +272 -0
  25. daal4py/sklearn/linear_model/_ridge.py +325 -0
  26. daal4py/sklearn/linear_model/coordinate_descent.py +17 -0
  27. daal4py/sklearn/linear_model/linear.py +17 -0
  28. daal4py/sklearn/linear_model/logistic_loss.py +195 -0
  29. daal4py/sklearn/linear_model/logistic_path.py +1026 -0
  30. daal4py/sklearn/linear_model/ridge.py +17 -0
  31. daal4py/sklearn/linear_model/tests/test_linear.py +208 -0
  32. daal4py/sklearn/linear_model/tests/test_ridge.py +69 -0
  33. daal4py/sklearn/manifold/__init__.py +19 -0
  34. daal4py/sklearn/manifold/_t_sne.py +405 -0
  35. daal4py/sklearn/metrics/__init__.py +20 -0
  36. daal4py/sklearn/metrics/_pairwise.py +236 -0
  37. daal4py/sklearn/metrics/_ranking.py +210 -0
  38. daal4py/sklearn/model_selection/__init__.py +19 -0
  39. daal4py/sklearn/model_selection/_split.py +309 -0
  40. daal4py/sklearn/model_selection/tests/test_split.py +56 -0
  41. daal4py/sklearn/monkeypatch/__init__.py +0 -0
  42. daal4py/sklearn/monkeypatch/dispatcher.py +232 -0
  43. daal4py/sklearn/monkeypatch/tests/_models_info.py +161 -0
  44. daal4py/sklearn/monkeypatch/tests/test_monkeypatch.py +71 -0
  45. daal4py/sklearn/monkeypatch/tests/test_patching.py +90 -0
  46. daal4py/sklearn/monkeypatch/tests/utils/_launch_algorithms.py +117 -0
  47. daal4py/sklearn/neighbors/__init__.py +21 -0
  48. daal4py/sklearn/neighbors/_base.py +503 -0
  49. daal4py/sklearn/neighbors/_classification.py +139 -0
  50. daal4py/sklearn/neighbors/_regression.py +74 -0
  51. daal4py/sklearn/neighbors/_unsupervised.py +55 -0
  52. daal4py/sklearn/neighbors/tests/test_kneighbors.py +113 -0
  53. daal4py/sklearn/svm/__init__.py +19 -0
  54. daal4py/sklearn/svm/svm.py +734 -0
  55. daal4py/sklearn/utils/__init__.py +21 -0
  56. daal4py/sklearn/utils/base.py +75 -0
  57. daal4py/sklearn/utils/tests/test_utils.py +51 -0
  58. daal4py/sklearn/utils/validation.py +696 -0
  59. onedal/__init__.py +83 -0
  60. onedal/_config.py +54 -0
  61. onedal/_device_offload.py +204 -0
  62. onedal/_onedal_py_dpc.cpython-313-x86_64-linux-gnu.so +0 -0
  63. onedal/_onedal_py_host.cpython-313-x86_64-linux-gnu.so +0 -0
  64. onedal/_onedal_py_spmd_dpc.cpython-313-x86_64-linux-gnu.so +0 -0
  65. onedal/basic_statistics/__init__.py +20 -0
  66. onedal/basic_statistics/basic_statistics.py +107 -0
  67. onedal/basic_statistics/incremental_basic_statistics.py +175 -0
  68. onedal/basic_statistics/tests/test_basic_statistics.py +242 -0
  69. onedal/basic_statistics/tests/test_incremental_basic_statistics.py +279 -0
  70. onedal/basic_statistics/tests/utils.py +50 -0
  71. onedal/cluster/__init__.py +27 -0
  72. onedal/cluster/dbscan.py +105 -0
  73. onedal/cluster/kmeans.py +557 -0
  74. onedal/cluster/kmeans_init.py +112 -0
  75. onedal/cluster/tests/test_dbscan.py +125 -0
  76. onedal/cluster/tests/test_kmeans.py +88 -0
  77. onedal/cluster/tests/test_kmeans_init.py +93 -0
  78. onedal/common/_base.py +38 -0
  79. onedal/common/_estimator_checks.py +47 -0
  80. onedal/common/_mixin.py +62 -0
  81. onedal/common/_policy.py +55 -0
  82. onedal/common/_spmd_policy.py +30 -0
  83. onedal/common/hyperparameters.py +125 -0
  84. onedal/common/tests/test_policy.py +76 -0
  85. onedal/common/tests/test_sycl.py +128 -0
  86. onedal/covariance/__init__.py +20 -0
  87. onedal/covariance/covariance.py +122 -0
  88. onedal/covariance/incremental_covariance.py +161 -0
  89. onedal/covariance/tests/test_covariance.py +50 -0
  90. onedal/covariance/tests/test_incremental_covariance.py +190 -0
  91. onedal/datatypes/__init__.py +19 -0
  92. onedal/datatypes/_data_conversion.py +121 -0
  93. onedal/datatypes/tests/common.py +126 -0
  94. onedal/datatypes/tests/test_data.py +475 -0
  95. onedal/decomposition/__init__.py +20 -0
  96. onedal/decomposition/incremental_pca.py +214 -0
  97. onedal/decomposition/pca.py +186 -0
  98. onedal/decomposition/tests/test_incremental_pca.py +285 -0
  99. onedal/ensemble/__init__.py +29 -0
  100. onedal/ensemble/forest.py +736 -0
  101. onedal/ensemble/tests/test_random_forest.py +97 -0
  102. onedal/linear_model/__init__.py +27 -0
  103. onedal/linear_model/incremental_linear_model.py +292 -0
  104. onedal/linear_model/linear_model.py +325 -0
  105. onedal/linear_model/logistic_regression.py +247 -0
  106. onedal/linear_model/tests/test_incremental_linear_regression.py +213 -0
  107. onedal/linear_model/tests/test_incremental_ridge_regression.py +171 -0
  108. onedal/linear_model/tests/test_linear_regression.py +259 -0
  109. onedal/linear_model/tests/test_logistic_regression.py +95 -0
  110. onedal/linear_model/tests/test_ridge.py +95 -0
  111. onedal/neighbors/__init__.py +19 -0
  112. onedal/neighbors/neighbors.py +763 -0
  113. onedal/neighbors/tests/test_knn_classification.py +49 -0
  114. onedal/primitives/__init__.py +27 -0
  115. onedal/primitives/get_tree.py +25 -0
  116. onedal/primitives/kernel_functions.py +152 -0
  117. onedal/primitives/tests/test_kernel_functions.py +159 -0
  118. onedal/spmd/__init__.py +25 -0
  119. onedal/spmd/_base.py +30 -0
  120. onedal/spmd/basic_statistics/__init__.py +20 -0
  121. onedal/spmd/basic_statistics/basic_statistics.py +30 -0
  122. onedal/spmd/basic_statistics/incremental_basic_statistics.py +71 -0
  123. onedal/spmd/cluster/__init__.py +28 -0
  124. onedal/spmd/cluster/dbscan.py +23 -0
  125. onedal/spmd/cluster/kmeans.py +56 -0
  126. onedal/spmd/covariance/__init__.py +20 -0
  127. onedal/spmd/covariance/covariance.py +26 -0
  128. onedal/spmd/covariance/incremental_covariance.py +83 -0
  129. onedal/spmd/decomposition/__init__.py +20 -0
  130. onedal/spmd/decomposition/incremental_pca.py +124 -0
  131. onedal/spmd/decomposition/pca.py +26 -0
  132. onedal/spmd/ensemble/__init__.py +19 -0
  133. onedal/spmd/ensemble/forest.py +28 -0
  134. onedal/spmd/linear_model/__init__.py +21 -0
  135. onedal/spmd/linear_model/incremental_linear_model.py +101 -0
  136. onedal/spmd/linear_model/linear_model.py +30 -0
  137. onedal/spmd/linear_model/logistic_regression.py +38 -0
  138. onedal/spmd/neighbors/__init__.py +19 -0
  139. onedal/spmd/neighbors/neighbors.py +75 -0
  140. onedal/svm/__init__.py +19 -0
  141. onedal/svm/svm.py +556 -0
  142. onedal/svm/tests/test_csr_svm.py +351 -0
  143. onedal/svm/tests/test_nusvc.py +204 -0
  144. onedal/svm/tests/test_nusvr.py +210 -0
  145. onedal/svm/tests/test_svc.py +176 -0
  146. onedal/svm/tests/test_svr.py +243 -0
  147. onedal/tests/test_common.py +57 -0
  148. onedal/tests/utils/_dataframes_support.py +162 -0
  149. onedal/tests/utils/_device_selection.py +102 -0
  150. onedal/utils/__init__.py +49 -0
  151. onedal/utils/_array_api.py +81 -0
  152. onedal/utils/_dpep_helpers.py +56 -0
  153. onedal/utils/tests/test_validation.py +142 -0
  154. onedal/utils/validation.py +464 -0
  155. scikit_learn_intelex-2025.4.0.dist-info/LICENSE.txt +202 -0
  156. scikit_learn_intelex-2025.4.0.dist-info/METADATA +190 -0
  157. scikit_learn_intelex-2025.4.0.dist-info/RECORD +282 -0
  158. scikit_learn_intelex-2025.4.0.dist-info/WHEEL +5 -0
  159. scikit_learn_intelex-2025.4.0.dist-info/top_level.txt +3 -0
  160. sklearnex/__init__.py +66 -0
  161. sklearnex/__main__.py +58 -0
  162. sklearnex/_config.py +116 -0
  163. sklearnex/_device_offload.py +126 -0
  164. sklearnex/_utils.py +177 -0
  165. sklearnex/basic_statistics/__init__.py +20 -0
  166. sklearnex/basic_statistics/basic_statistics.py +261 -0
  167. sklearnex/basic_statistics/incremental_basic_statistics.py +352 -0
  168. sklearnex/basic_statistics/tests/test_basic_statistics.py +405 -0
  169. sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +455 -0
  170. sklearnex/cluster/__init__.py +20 -0
  171. sklearnex/cluster/dbscan.py +197 -0
  172. sklearnex/cluster/k_means.py +397 -0
  173. sklearnex/cluster/tests/test_dbscan.py +38 -0
  174. sklearnex/cluster/tests/test_kmeans.py +157 -0
  175. sklearnex/conftest.py +82 -0
  176. sklearnex/covariance/__init__.py +19 -0
  177. sklearnex/covariance/incremental_covariance.py +405 -0
  178. sklearnex/covariance/tests/test_incremental_covariance.py +287 -0
  179. sklearnex/decomposition/__init__.py +19 -0
  180. sklearnex/decomposition/pca.py +427 -0
  181. sklearnex/decomposition/tests/test_pca.py +58 -0
  182. sklearnex/dispatcher.py +534 -0
  183. sklearnex/doc/third-party-programs.txt +424 -0
  184. sklearnex/ensemble/__init__.py +29 -0
  185. sklearnex/ensemble/_forest.py +2029 -0
  186. sklearnex/ensemble/tests/test_forest.py +140 -0
  187. sklearnex/glob/__main__.py +72 -0
  188. sklearnex/glob/dispatcher.py +101 -0
  189. sklearnex/linear_model/__init__.py +32 -0
  190. sklearnex/linear_model/coordinate_descent.py +30 -0
  191. sklearnex/linear_model/incremental_linear.py +495 -0
  192. sklearnex/linear_model/incremental_ridge.py +432 -0
  193. sklearnex/linear_model/linear.py +346 -0
  194. sklearnex/linear_model/logistic_regression.py +415 -0
  195. sklearnex/linear_model/ridge.py +390 -0
  196. sklearnex/linear_model/tests/test_incremental_linear.py +267 -0
  197. sklearnex/linear_model/tests/test_incremental_ridge.py +214 -0
  198. sklearnex/linear_model/tests/test_linear.py +142 -0
  199. sklearnex/linear_model/tests/test_logreg.py +134 -0
  200. sklearnex/linear_model/tests/test_ridge.py +256 -0
  201. sklearnex/manifold/__init__.py +19 -0
  202. sklearnex/manifold/t_sne.py +26 -0
  203. sklearnex/manifold/tests/test_tsne.py +250 -0
  204. sklearnex/metrics/__init__.py +23 -0
  205. sklearnex/metrics/pairwise.py +22 -0
  206. sklearnex/metrics/ranking.py +20 -0
  207. sklearnex/metrics/tests/test_metrics.py +39 -0
  208. sklearnex/model_selection/__init__.py +21 -0
  209. sklearnex/model_selection/split.py +22 -0
  210. sklearnex/model_selection/tests/test_model_selection.py +34 -0
  211. sklearnex/neighbors/__init__.py +27 -0
  212. sklearnex/neighbors/_lof.py +236 -0
  213. sklearnex/neighbors/common.py +310 -0
  214. sklearnex/neighbors/knn_classification.py +231 -0
  215. sklearnex/neighbors/knn_regression.py +207 -0
  216. sklearnex/neighbors/knn_unsupervised.py +178 -0
  217. sklearnex/neighbors/tests/test_neighbors.py +82 -0
  218. sklearnex/preview/__init__.py +17 -0
  219. sklearnex/preview/covariance/__init__.py +19 -0
  220. sklearnex/preview/covariance/covariance.py +142 -0
  221. sklearnex/preview/covariance/tests/test_covariance.py +66 -0
  222. sklearnex/preview/decomposition/__init__.py +19 -0
  223. sklearnex/preview/decomposition/incremental_pca.py +244 -0
  224. sklearnex/preview/decomposition/tests/test_incremental_pca.py +336 -0
  225. sklearnex/spmd/__init__.py +25 -0
  226. sklearnex/spmd/basic_statistics/__init__.py +20 -0
  227. sklearnex/spmd/basic_statistics/basic_statistics.py +21 -0
  228. sklearnex/spmd/basic_statistics/incremental_basic_statistics.py +30 -0
  229. sklearnex/spmd/basic_statistics/tests/test_basic_statistics_spmd.py +107 -0
  230. sklearnex/spmd/basic_statistics/tests/test_incremental_basic_statistics_spmd.py +306 -0
  231. sklearnex/spmd/cluster/__init__.py +30 -0
  232. sklearnex/spmd/cluster/dbscan.py +50 -0
  233. sklearnex/spmd/cluster/kmeans.py +21 -0
  234. sklearnex/spmd/cluster/tests/test_dbscan_spmd.py +97 -0
  235. sklearnex/spmd/cluster/tests/test_kmeans_spmd.py +173 -0
  236. sklearnex/spmd/covariance/__init__.py +20 -0
  237. sklearnex/spmd/covariance/covariance.py +21 -0
  238. sklearnex/spmd/covariance/incremental_covariance.py +37 -0
  239. sklearnex/spmd/covariance/tests/test_covariance_spmd.py +107 -0
  240. sklearnex/spmd/covariance/tests/test_incremental_covariance_spmd.py +184 -0
  241. sklearnex/spmd/decomposition/__init__.py +20 -0
  242. sklearnex/spmd/decomposition/incremental_pca.py +30 -0
  243. sklearnex/spmd/decomposition/pca.py +21 -0
  244. sklearnex/spmd/decomposition/tests/test_incremental_pca_spmd.py +269 -0
  245. sklearnex/spmd/decomposition/tests/test_pca_spmd.py +128 -0
  246. sklearnex/spmd/ensemble/__init__.py +19 -0
  247. sklearnex/spmd/ensemble/forest.py +71 -0
  248. sklearnex/spmd/ensemble/tests/test_forest_spmd.py +265 -0
  249. sklearnex/spmd/linear_model/__init__.py +21 -0
  250. sklearnex/spmd/linear_model/incremental_linear_model.py +35 -0
  251. sklearnex/spmd/linear_model/linear_model.py +21 -0
  252. sklearnex/spmd/linear_model/logistic_regression.py +21 -0
  253. sklearnex/spmd/linear_model/tests/test_incremental_linear_spmd.py +331 -0
  254. sklearnex/spmd/linear_model/tests/test_linear_regression_spmd.py +145 -0
  255. sklearnex/spmd/linear_model/tests/test_logistic_regression_spmd.py +162 -0
  256. sklearnex/spmd/neighbors/__init__.py +19 -0
  257. sklearnex/spmd/neighbors/neighbors.py +25 -0
  258. sklearnex/spmd/neighbors/tests/test_neighbors_spmd.py +288 -0
  259. sklearnex/svm/__init__.py +29 -0
  260. sklearnex/svm/_common.py +339 -0
  261. sklearnex/svm/nusvc.py +371 -0
  262. sklearnex/svm/nusvr.py +170 -0
  263. sklearnex/svm/svc.py +399 -0
  264. sklearnex/svm/svr.py +167 -0
  265. sklearnex/svm/tests/test_svm.py +93 -0
  266. sklearnex/tests/test_common.py +491 -0
  267. sklearnex/tests/test_config.py +123 -0
  268. sklearnex/tests/test_hyperparameters.py +43 -0
  269. sklearnex/tests/test_memory_usage.py +347 -0
  270. sklearnex/tests/test_monkeypatch.py +269 -0
  271. sklearnex/tests/test_n_jobs_support.py +108 -0
  272. sklearnex/tests/test_parallel.py +48 -0
  273. sklearnex/tests/test_patching.py +377 -0
  274. sklearnex/tests/test_run_to_run_stability.py +326 -0
  275. sklearnex/tests/utils/__init__.py +48 -0
  276. sklearnex/tests/utils/base.py +436 -0
  277. sklearnex/tests/utils/spmd.py +198 -0
  278. sklearnex/utils/__init__.py +19 -0
  279. sklearnex/utils/_array_api.py +82 -0
  280. sklearnex/utils/parallel.py +59 -0
  281. sklearnex/utils/tests/test_validation.py +238 -0
  282. sklearnex/utils/validation.py +208 -0
@@ -0,0 +1,696 @@
1
+ # ==============================================================================
2
+ # Copyright 2014 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ==============================================================================
16
+
17
+ import warnings
18
+ from contextlib import suppress
19
+
20
+ import numpy as np
21
+ import scipy.sparse as sp
22
+
23
+ if np.lib.NumpyVersion(np.__version__) >= np.lib.NumpyVersion("2.0.0a0"):
24
+ # numpy_version >= 2.0
25
+ from numpy.exceptions import ComplexWarning
26
+ else:
27
+ # numpy_version < 2.0
28
+ from numpy.core.numeric import ComplexWarning
29
+
30
+ from sklearn import get_config as _get_config
31
+ from sklearn.utils.extmath import _safe_accumulator_op
32
+ from sklearn.utils.fixes import _object_dtype_isnan
33
+ from sklearn.utils.validation import _assert_all_finite as _sklearn_assert_all_finite
34
+ from sklearn.utils.validation import (
35
+ _ensure_no_complex_data,
36
+ _ensure_sparse_format,
37
+ _num_samples,
38
+ check_consistent_length,
39
+ column_or_1d,
40
+ )
41
+
42
+ import daal4py as d4p
43
+
44
+ from .._utils import (
45
+ PatchingConditionsChain,
46
+ get_dtype,
47
+ get_number_of_types,
48
+ is_DataFrame,
49
+ sklearn_check_version,
50
+ )
51
+
52
+
53
+ def _assert_all_finite(
54
+ X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name=""
55
+ ):
56
+ if _get_config()["assume_finite"]:
57
+ return
58
+
59
+ # Data with small size has too big relative overhead
60
+ # TODO: tune threshold size
61
+ if hasattr(X, "size"):
62
+ if X.size < 32768:
63
+ if sklearn_check_version("1.1"):
64
+ _sklearn_assert_all_finite(
65
+ X,
66
+ allow_nan=allow_nan,
67
+ msg_dtype=msg_dtype,
68
+ estimator_name=estimator_name,
69
+ input_name=input_name,
70
+ )
71
+ else:
72
+ _sklearn_assert_all_finite(X, allow_nan=allow_nan, msg_dtype=msg_dtype)
73
+ return
74
+
75
+ is_df = is_DataFrame(X)
76
+ num_of_types = get_number_of_types(X)
77
+
78
+ # if X is heterogeneous pandas.DataFrame then
79
+ # covert it to a list of arrays
80
+ if is_df and num_of_types > 1:
81
+ lst = []
82
+ for idx in X:
83
+ arr = X[idx].to_numpy()
84
+ lst.append(arr if arr.flags["C_CONTIGUOUS"] else np.ascontiguousarray(arr))
85
+ else:
86
+ X = np.asanyarray(X)
87
+ is_df = False
88
+
89
+ dt = np.dtype(get_dtype(X))
90
+ is_float = dt.kind in "fc"
91
+
92
+ msg_err = "Input {} contains {} or a value too large for {!r}."
93
+ type_err = "infinity" if allow_nan else "NaN, infinity"
94
+ err = msg_err.format(input_name, type_err, msg_dtype if msg_dtype is not None else dt)
95
+
96
+ _patching_status = PatchingConditionsChain(
97
+ "sklearn.utils.validation._assert_all_finite"
98
+ )
99
+ _dal_ready = _patching_status.and_conditions(
100
+ [
101
+ (X.ndim in [1, 2], f"Input {input_name} does not have 1 or 2 dimensions."),
102
+ (not np.any(np.equal(X.shape, 0)), f"Input {input_name} shape contains a 0."),
103
+ (
104
+ dt in [np.float32, np.float64],
105
+ f"Input {input_name} dtype is not float32 or float64.",
106
+ ),
107
+ ]
108
+ )
109
+ _patching_status.write_log()
110
+ if _dal_ready:
111
+ if X.ndim == 1:
112
+ X = X.reshape((-1, 1))
113
+
114
+ x_for_daal = lst if is_df and num_of_types > 1 else X
115
+
116
+ if dt == np.float64:
117
+ if not d4p.daal_assert_all_finite(x_for_daal, allow_nan, 0):
118
+ raise ValueError(err)
119
+ elif dt == np.float32:
120
+ if not d4p.daal_assert_all_finite(x_for_daal, allow_nan, 1):
121
+ raise ValueError(err)
122
+ # First try an O(n) time, O(1) space solution for the common case that
123
+ # everything is finite; fall back to O(n) space np.isfinite to prevent
124
+ # false positives from overflow in sum method. The sum is also calculated
125
+ # safely to reduce dtype induced overflows.
126
+ elif is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))):
127
+ pass
128
+ elif is_float:
129
+ if allow_nan and np.isinf(X).any() or not allow_nan and not np.isfinite(X).all():
130
+ raise ValueError(err)
131
+ # for object dtype data, we only check for NaNs (GH-13254)
132
+ elif dt == np.dtype("object") and not allow_nan:
133
+ if _object_dtype_isnan(X).any():
134
+ raise ValueError(f"Input {input_name} contains NaN")
135
+
136
+
137
+ def _pandas_check_array(
138
+ array,
139
+ array_orig,
140
+ force_all_finite,
141
+ ensure_min_samples,
142
+ ensure_min_features,
143
+ copy,
144
+ context,
145
+ ):
146
+ if force_all_finite:
147
+ _assert_all_finite(array, allow_nan=force_all_finite == "allow-nan")
148
+
149
+ if ensure_min_samples > 0:
150
+ n_samples = _num_samples(array)
151
+ if n_samples < ensure_min_samples:
152
+ raise ValueError(
153
+ "Found array with %d sample(s) (shape=%s) while a"
154
+ " minimum of %d is required%s."
155
+ % (n_samples, array.shape, ensure_min_samples, context)
156
+ )
157
+
158
+ if ensure_min_features > 0:
159
+ n_features = array.shape[1]
160
+ if n_features < ensure_min_features:
161
+ raise ValueError(
162
+ "Found array with %d feature(s) (shape=%s) while"
163
+ " a minimum of %d is required%s."
164
+ % (n_features, array.shape, ensure_min_features, context)
165
+ )
166
+
167
+ if copy and np.may_share_memory(array, array_orig):
168
+ array = array.copy()
169
+
170
+ return array
171
+
172
+
173
+ def _daal_check_array(
174
+ array,
175
+ accept_sparse=False,
176
+ *,
177
+ accept_large_sparse=True,
178
+ dtype="numeric",
179
+ order=None,
180
+ copy=False,
181
+ force_all_finite=True,
182
+ ensure_2d=True,
183
+ allow_nd=False,
184
+ ensure_min_samples=1,
185
+ ensure_min_features=1,
186
+ estimator=None,
187
+ ):
188
+ """Input validation on an array, list, sparse matrix or similar.
189
+
190
+ By default, the input is checked to be a non-empty 2D array containing
191
+ only finite values. If the dtype of the array is object, attempt
192
+ converting to float, raising on failure.
193
+
194
+ Parameters
195
+ ----------
196
+ array : object
197
+ Input object to check / convert.
198
+
199
+ accept_sparse : string, boolean or list/tuple of strings (default=False)
200
+ String[s] representing allowed sparse matrix formats, such as 'csc',
201
+ 'csr', etc. If the input is sparse but not in the allowed format,
202
+ it will be converted to the first listed format. True allows the input
203
+ to be any format. False means that a sparse matrix input will
204
+ raise an error.
205
+
206
+ accept_large_sparse : bool (default=True)
207
+ If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
208
+ accept_sparse, accept_large_sparse=False will cause it to be accepted
209
+ only if its indices are stored with a 32-bit dtype.
210
+
211
+ .. versionadded:: 0.20
212
+
213
+ dtype : string, type, list of types or None (default="numeric")
214
+ Data type of result. If None, the dtype of the input is preserved.
215
+ If "numeric", dtype is preserved unless array.dtype is object.
216
+ If dtype is a list of types, conversion on the first type is only
217
+ performed if the dtype of the input is not in the list.
218
+
219
+ order : 'F', 'C' or None (default=None)
220
+ Whether an array will be forced to be fortran or c-style.
221
+ When order is None (default), then if copy=False, nothing is ensured
222
+ about the memory layout of the output array; otherwise (copy=True)
223
+ the memory layout of the returned array is kept as close as possible
224
+ to the original array.
225
+
226
+ copy : boolean (default=False)
227
+ Whether a forced copy will be triggered. If copy=False, a copy might
228
+ be triggered by a conversion.
229
+
230
+ force_all_finite : boolean or 'allow-nan', (default=True)
231
+ Whether to raise an error on np.inf, np.nan, pd.NA in array. The
232
+ possibilities are:
233
+
234
+ - True: Force all values of array to be finite.
235
+ - False: accepts np.inf, np.nan, pd.NA in array.
236
+ - 'allow-nan': accepts only np.nan and pd.NA values in array. Values
237
+ cannot be infinite.
238
+
239
+ .. versionadded:: 0.20
240
+ ``force_all_finite`` accepts the string ``'allow-nan'``.
241
+
242
+ .. versionchanged:: 0.23
243
+ Accepts `pd.NA` and converts it into `np.nan`
244
+
245
+ ensure_2d : boolean (default=True)
246
+ Whether to raise a value error if array is not 2D.
247
+
248
+ allow_nd : boolean (default=False)
249
+ Whether to allow array.ndim > 2.
250
+
251
+ ensure_min_samples : int (default=1)
252
+ Make sure that the array has a minimum number of samples in its first
253
+ axis (rows for a 2D array). Setting to 0 disables this check.
254
+
255
+ ensure_min_features : int (default=1)
256
+ Make sure that the 2D array has some minimum number of features
257
+ (columns). The default value of 1 rejects empty datasets.
258
+ This check is only enforced when the input data has effectively 2
259
+ dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
260
+ disables this check.
261
+
262
+ estimator : str or estimator instance (default=None)
263
+ If passed, include the name of the estimator in warning messages.
264
+
265
+ Returns
266
+ -------
267
+ array_converted : object
268
+ The converted and validated array.
269
+ """
270
+ if force_all_finite not in (True, False, "allow-nan"):
271
+ raise ValueError(
272
+ 'force_all_finite should be a bool or "allow-nan"'
273
+ ". Got {!r} instead".format(force_all_finite)
274
+ )
275
+
276
+ if estimator is not None:
277
+ if isinstance(estimator, str):
278
+ estimator_name = estimator
279
+ else:
280
+ estimator_name = estimator.__class__.__name__
281
+ else:
282
+ estimator_name = "Estimator"
283
+ context = " by %s" % estimator_name if estimator is not None else ""
284
+
285
+ array_orig = array
286
+
287
+ # a branch for heterogeneous pandas.DataFrame
288
+ if is_DataFrame(array) and get_number_of_types(array) > 1:
289
+ from pandas import SparseDtype
290
+
291
+ def is_sparse(dtype):
292
+ return isinstance(dtype, SparseDtype)
293
+
294
+ if hasattr(array, "sparse") or not array.dtypes.apply(is_sparse).any():
295
+ return _pandas_check_array(
296
+ array,
297
+ array_orig,
298
+ force_all_finite,
299
+ ensure_min_samples,
300
+ ensure_min_features,
301
+ copy,
302
+ context,
303
+ )
304
+
305
+ # store whether originally we wanted numeric dtype
306
+ dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
307
+
308
+ dtype_orig = getattr(array, "dtype", None)
309
+ if not hasattr(dtype_orig, "kind"):
310
+ # not a data type (e.g. a column named dtype in a pandas DataFrame)
311
+ dtype_orig = None
312
+
313
+ # check if the object contains several dtypes (typically a pandas
314
+ # DataFrame), and store them. If not, store None.
315
+ dtypes_orig = None
316
+ has_pd_integer_array = False
317
+ if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
318
+ # throw warning if columns are sparse. If all columns are sparse, then
319
+ # array.sparse exists and sparsity will be perserved (later).
320
+ with suppress(ImportError):
321
+ from pandas import SparseDtype
322
+
323
+ def is_sparse(dtype):
324
+ return isinstance(dtype, SparseDtype)
325
+
326
+ if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
327
+ warnings.warn(
328
+ "pandas.DataFrame with sparse columns found."
329
+ "It will be converted to a dense numpy array."
330
+ )
331
+
332
+ dtypes_orig = list(array.dtypes)
333
+ # pandas boolean dtype __array__ interface coerces bools to objects
334
+ for i, dtype_iter in enumerate(dtypes_orig):
335
+ if dtype_iter.kind == "b":
336
+ dtypes_orig[i] = np.dtype(np.object)
337
+ elif dtype_iter.name.startswith(("Int", "UInt")):
338
+ # name looks like an Integer Extension Array, now check for
339
+ # the dtype
340
+ with suppress(ImportError):
341
+ from pandas import (
342
+ Int8Dtype,
343
+ Int16Dtype,
344
+ Int32Dtype,
345
+ Int64Dtype,
346
+ UInt8Dtype,
347
+ UInt16Dtype,
348
+ UInt32Dtype,
349
+ UInt64Dtype,
350
+ )
351
+
352
+ if isinstance(
353
+ dtype_iter,
354
+ (
355
+ Int8Dtype,
356
+ Int16Dtype,
357
+ Int32Dtype,
358
+ Int64Dtype,
359
+ UInt8Dtype,
360
+ UInt16Dtype,
361
+ UInt32Dtype,
362
+ UInt64Dtype,
363
+ ),
364
+ ):
365
+ has_pd_integer_array = True
366
+
367
+ if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig):
368
+ dtype_orig = np.result_type(*dtypes_orig)
369
+
370
+ if dtype_numeric:
371
+ if dtype_orig is not None and dtype_orig.kind == "O":
372
+ # if input is object, convert to float.
373
+ dtype = np.float64
374
+ else:
375
+ dtype = None
376
+
377
+ if isinstance(dtype, (list, tuple)):
378
+ if dtype_orig is not None and dtype_orig in dtype:
379
+ # no dtype conversion required
380
+ dtype = None
381
+ else:
382
+ # dtype conversion required. Let's select the first element of the
383
+ # list of accepted types.
384
+ dtype = dtype[0]
385
+
386
+ if has_pd_integer_array:
387
+ # If there are any pandas integer extension arrays,
388
+ array = array.astype(dtype)
389
+
390
+ # When all dataframe columns are sparse, convert to a sparse array
391
+ if hasattr(array, "sparse") and array.ndim > 1:
392
+ # DataFrame.sparse only supports `to_coo`
393
+ array = array.sparse.to_coo()
394
+
395
+ if sp.issparse(array):
396
+ _ensure_no_complex_data(array)
397
+ array = _ensure_sparse_format(
398
+ array,
399
+ accept_sparse=accept_sparse,
400
+ dtype=dtype,
401
+ copy=copy,
402
+ force_all_finite=force_all_finite,
403
+ accept_large_sparse=accept_large_sparse,
404
+ )
405
+ else:
406
+ # If np.array(..) gives ComplexWarning, then we convert the warning
407
+ # to an error. This is needed because specifying a non complex
408
+ # dtype to the function converts complex to real dtype,
409
+ # thereby passing the test made in the lines following the scope
410
+ # of warnings context manager.
411
+ with warnings.catch_warnings():
412
+ try:
413
+ warnings.simplefilter("error", ComplexWarning)
414
+ if dtype is not None and np.dtype(dtype).kind in "iu":
415
+ # Conversion float -> int should not contain NaN or
416
+ # inf (numpy#14412). We cannot use casting='safe' because
417
+ # then conversion float -> int would be disallowed.
418
+ array = np.asarray(array, order=order)
419
+ if array.dtype.kind == "f":
420
+ _assert_all_finite(array, allow_nan=False, msg_dtype=dtype)
421
+ array = array.astype(dtype, casting="unsafe", copy=False)
422
+ else:
423
+ array = np.asarray(array, order=order, dtype=dtype)
424
+ except ComplexWarning:
425
+ raise ValueError("Complex data not supported\n" "{}\n".format(array))
426
+
427
+ # It is possible that the np.array(..) gave no warning. This happens
428
+ # when no dtype conversion happened, for example dtype = None. The
429
+ # result is that np.array(..) produces an array of complex dtype
430
+ # and we need to catch and raise exception for such cases.
431
+ _ensure_no_complex_data(array) # doing nothing for DataFrame
432
+
433
+ if ensure_2d:
434
+ # If input is scalar raise error
435
+ if array.ndim == 0:
436
+ raise ValueError(
437
+ "Expected 2D array, got scalar array instead:\narray={}.\n"
438
+ "Reshape your data either using array.reshape(-1, 1) if "
439
+ "your data has a single feature or array.reshape(1, -1) "
440
+ "if it contains a single sample.".format(array)
441
+ )
442
+ # If input is 1D raise error
443
+ if array.ndim == 1:
444
+ raise ValueError(
445
+ "Expected 2D array, got 1D array instead:\narray={}.\n"
446
+ "Reshape your data either using array.reshape(-1, 1) if "
447
+ "your data has a single feature or array.reshape(1, -1) "
448
+ "if it contains a single sample.".format(array)
449
+ )
450
+
451
+ # in the future np.flexible dtypes will be handled like object dtypes
452
+ if dtype_numeric and np.issubdtype(array.dtype, np.flexible):
453
+ warnings.warn(
454
+ "Beginning in version 0.22, arrays of bytes/strings will be "
455
+ "converted to decimal numbers if dtype='numeric'. "
456
+ "It is recommended that you convert the array to "
457
+ "a float dtype before using it in scikit-learn, "
458
+ "for example by using "
459
+ "your_array = your_array.astype(np.float64).",
460
+ FutureWarning,
461
+ stacklevel=2,
462
+ )
463
+
464
+ # make sure we actually converted to numeric:
465
+ if dtype_numeric and array.dtype.kind == "O":
466
+ array = array.astype(np.float64)
467
+ if not allow_nd and array.ndim >= 3:
468
+ raise ValueError(
469
+ "Found array with dim %d. %s expected <= 2."
470
+ % (array.ndim, estimator_name)
471
+ )
472
+
473
+ if force_all_finite:
474
+ _assert_all_finite(array, allow_nan=force_all_finite == "allow-nan")
475
+
476
+ if ensure_min_samples > 0:
477
+ n_samples = _num_samples(array)
478
+ if n_samples < ensure_min_samples:
479
+ raise ValueError(
480
+ "Found array with %d sample(s) (shape=%s) while a"
481
+ " minimum of %d is required%s."
482
+ % (n_samples, array.shape, ensure_min_samples, context)
483
+ )
484
+
485
+ if ensure_min_features > 0 and array.ndim == 2:
486
+ n_features = array.shape[1]
487
+ if n_features < ensure_min_features:
488
+ raise ValueError(
489
+ "Found array with %d feature(s) (shape=%s) while"
490
+ " a minimum of %d is required%s."
491
+ % (n_features, array.shape, ensure_min_features, context)
492
+ )
493
+
494
+ if copy and np.may_share_memory(array, array_orig):
495
+ array = np.array(array, dtype=dtype, order=order)
496
+
497
+ return array
498
+
499
+
500
+ def _daal_check_X_y(
501
+ X,
502
+ y,
503
+ accept_sparse=False,
504
+ *,
505
+ accept_large_sparse=True,
506
+ dtype="numeric",
507
+ order=None,
508
+ copy=False,
509
+ force_all_finite=True,
510
+ ensure_2d=True,
511
+ allow_nd=False,
512
+ multi_output=False,
513
+ ensure_min_samples=1,
514
+ ensure_min_features=1,
515
+ y_numeric=False,
516
+ estimator=None,
517
+ ):
518
+ """Input validation for standard estimators.
519
+
520
+ Checks X and y for consistent length, enforces X to be 2D and y 1D. By
521
+ default, X is checked to be non-empty and containing only finite values.
522
+ Standard input checks are also applied to y, such as checking that y
523
+ does not have np.nan or np.inf targets. For multi-label y, set
524
+ multi_output=True to allow 2D and sparse y. If the dtype of X is
525
+ object, attempt converting to float, raising on failure.
526
+
527
+ Parameters
528
+ ----------
529
+ X : nd-array, list or sparse matrix
530
+ Input data.
531
+
532
+ y : nd-array, list or sparse matrix
533
+ Labels.
534
+
535
+ accept_sparse : string, boolean or list of string (default=False)
536
+ String[s] representing allowed sparse matrix formats, such as 'csc',
537
+ 'csr', etc. If the input is sparse but not in the allowed format,
538
+ it will be converted to the first listed format. True allows the input
539
+ to be any format. False means that a sparse matrix input will
540
+ raise an error.
541
+
542
+ accept_large_sparse : bool (default=True)
543
+ If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
544
+ accept_sparse, accept_large_sparse will cause it to be accepted only
545
+ if its indices are stored with a 32-bit dtype.
546
+
547
+ .. versionadded:: 0.20
548
+
549
+ dtype : string, type, list of types or None (default="numeric")
550
+ Data type of result. If None, the dtype of the input is preserved.
551
+ If "numeric", dtype is preserved unless array.dtype is object.
552
+ If dtype is a list of types, conversion on the first type is only
553
+ performed if the dtype of the input is not in the list.
554
+
555
+ order : 'F', 'C' or None (default=None)
556
+ Whether an array will be forced to be fortran or c-style.
557
+
558
+ copy : boolean (default=False)
559
+ Whether a forced copy will be triggered. If copy=False, a copy might
560
+ be triggered by a conversion.
561
+
562
+ force_all_finite : boolean or 'allow-nan', (default=True)
563
+ Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
564
+ does not influence whether y can have np.inf, np.nan, pd.NA values.
565
+ The possibilities are:
566
+
567
+ - True: Force all values of X to be finite.
568
+ - False: accepts np.inf, np.nan, pd.NA in X.
569
+ - 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
570
+ be infinite.
571
+
572
+ .. versionadded:: 0.20
573
+ ``force_all_finite`` accepts the string ``'allow-nan'``.
574
+
575
+ .. versionchanged:: 0.23
576
+ Accepts `pd.NA` and converts it into `np.nan`
577
+
578
+ ensure_2d : boolean (default=True)
579
+ Whether to raise a value error if X is not 2D.
580
+
581
+ allow_nd : boolean (default=False)
582
+ Whether to allow X.ndim > 2.
583
+
584
+ multi_output : boolean (default=False)
585
+ Whether to allow 2D y (array or sparse matrix). If false, y will be
586
+ validated as a vector. y cannot have np.nan or np.inf values if
587
+ multi_output=True.
588
+
589
+ ensure_min_samples : int (default=1)
590
+ Make sure that X has a minimum number of samples in its first
591
+ axis (rows for a 2D array).
592
+
593
+ ensure_min_features : int (default=1)
594
+ Make sure that the 2D array has some minimum number of features
595
+ (columns). The default value of 1 rejects empty datasets.
596
+ This check is only enforced when X has effectively 2 dimensions or
597
+ is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
598
+ this check.
599
+
600
+ y_numeric : boolean (default=False)
601
+ Whether to ensure that y has a numeric type. If dtype of y is object,
602
+ it is converted to float64. Should only be used for regression
603
+ algorithms.
604
+
605
+ estimator : str or estimator instance (default=None)
606
+ If passed, include the name of the estimator in warning messages.
607
+
608
+ Returns
609
+ -------
610
+ X_converted : object
611
+ The converted and validated X.
612
+
613
+ y_converted : object
614
+ The converted and validated y.
615
+ """
616
+ if y is None:
617
+ raise ValueError("y cannot be None")
618
+
619
+ X = _daal_check_array(
620
+ X,
621
+ accept_sparse=accept_sparse,
622
+ accept_large_sparse=accept_large_sparse,
623
+ dtype=dtype,
624
+ order=order,
625
+ copy=copy,
626
+ force_all_finite=force_all_finite,
627
+ ensure_2d=ensure_2d,
628
+ allow_nd=allow_nd,
629
+ ensure_min_samples=ensure_min_samples,
630
+ ensure_min_features=ensure_min_features,
631
+ estimator=estimator,
632
+ )
633
+ if multi_output:
634
+ y = _daal_check_array(
635
+ y, accept_sparse="csr", force_all_finite=True, ensure_2d=False, dtype=None
636
+ )
637
+ else:
638
+ y = column_or_1d(y, warn=True)
639
+ _assert_all_finite(y)
640
+ if y_numeric and hasattr(y, "dtype") and y.dtype.kind == "O":
641
+ y = y.astype(np.float64)
642
+
643
+ check_consistent_length(X, y)
644
+
645
+ return X, y
646
+
647
+
648
+ def _daal_num_features(X):
649
+ """Return the number of features in an array-like X.
650
+ This helper function tries hard to avoid to materialize an array version
651
+ of X unless necessary. For instance, if X is a list of lists,
652
+ this function will return the length of the first element, assuming
653
+ that subsequent elements are all lists of the same length without
654
+ checking.
655
+ Parameters
656
+ ----------
657
+ X : array-like
658
+ array-like to get the number of features.
659
+ Returns
660
+ -------
661
+ features : int
662
+ Number of features
663
+ """
664
+ type_ = type(X)
665
+ if type_.__module__ == "builtins":
666
+ type_name = type_.__qualname__
667
+ else:
668
+ type_name = f"{type_.__module__}.{type_.__qualname__}"
669
+ message = f"Unable to find the number of features from X of type {type_name}"
670
+ if not hasattr(X, "__len__") and not hasattr(X, "shape"):
671
+ if not hasattr(X, "__array__"):
672
+ raise TypeError(message)
673
+ # Only convert X to a numpy array if there is no cheaper, heuristic
674
+ # option.
675
+ X = np.asarray(X)
676
+
677
+ if hasattr(X, "shape"):
678
+ if not hasattr(X.shape, "__len__") or len(X.shape) <= 1:
679
+ message += f" with shape {X.shape}"
680
+ raise TypeError(message)
681
+ return X.shape[1]
682
+
683
+ first_sample = X[0]
684
+
685
+ # Do not consider an array-like of strings or dicts to be a 2D array
686
+ if isinstance(first_sample, (str, bytes, dict)):
687
+ message += f" where the samples are of type {type(first_sample).__qualname__}"
688
+ raise TypeError(message)
689
+
690
+ try:
691
+ # If X is a list of lists, for instance, we assume that all nested
692
+ # lists have the same length without checking or converting to
693
+ # a numpy array to keep this function call as cheap as possible.
694
+ return len(first_sample)
695
+ except Exception as err:
696
+ raise TypeError(message) from err