scikit-learn-intelex 2025.0.0__py39-none-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of scikit-learn-intelex might be problematic. Click here for more details.
- daal4py/__init__.py +73 -0
- daal4py/__main__.py +58 -0
- daal4py/_daal4py.cpython-39-x86_64-linux-gnu.so +0 -0
- daal4py/doc/third-party-programs.txt +424 -0
- daal4py/mb/__init__.py +19 -0
- daal4py/mb/model_builders.py +377 -0
- daal4py/mpi_transceiver.cpython-39-x86_64-linux-gnu.so +0 -0
- daal4py/sklearn/__init__.py +40 -0
- daal4py/sklearn/_n_jobs_support.py +242 -0
- daal4py/sklearn/_utils.py +241 -0
- daal4py/sklearn/cluster/__init__.py +20 -0
- daal4py/sklearn/cluster/dbscan.py +165 -0
- daal4py/sklearn/cluster/k_means.py +597 -0
- daal4py/sklearn/cluster/tests/test_dbscan.py +109 -0
- daal4py/sklearn/decomposition/__init__.py +19 -0
- daal4py/sklearn/decomposition/_pca.py +524 -0
- daal4py/sklearn/ensemble/AdaBoostClassifier.py +192 -0
- daal4py/sklearn/ensemble/GBTDAAL.py +318 -0
- daal4py/sklearn/ensemble/__init__.py +27 -0
- daal4py/sklearn/ensemble/_forest.py +1397 -0
- daal4py/sklearn/ensemble/tests/test_decision_forest.py +206 -0
- daal4py/sklearn/linear_model/__init__.py +29 -0
- daal4py/sklearn/linear_model/_coordinate_descent.py +848 -0
- daal4py/sklearn/linear_model/_linear.py +272 -0
- daal4py/sklearn/linear_model/_ridge.py +325 -0
- daal4py/sklearn/linear_model/coordinate_descent.py +17 -0
- daal4py/sklearn/linear_model/linear.py +17 -0
- daal4py/sklearn/linear_model/logistic_loss.py +195 -0
- daal4py/sklearn/linear_model/logistic_path.py +1026 -0
- daal4py/sklearn/linear_model/ridge.py +17 -0
- daal4py/sklearn/linear_model/tests/test_linear.py +196 -0
- daal4py/sklearn/linear_model/tests/test_ridge.py +69 -0
- daal4py/sklearn/manifold/__init__.py +19 -0
- daal4py/sklearn/manifold/_t_sne.py +405 -0
- daal4py/sklearn/metrics/__init__.py +20 -0
- daal4py/sklearn/metrics/_pairwise.py +155 -0
- daal4py/sklearn/metrics/_ranking.py +210 -0
- daal4py/sklearn/model_selection/__init__.py +19 -0
- daal4py/sklearn/model_selection/_split.py +309 -0
- daal4py/sklearn/model_selection/tests/test_split.py +56 -0
- daal4py/sklearn/monkeypatch/__init__.py +0 -0
- daal4py/sklearn/monkeypatch/dispatcher.py +232 -0
- daal4py/sklearn/monkeypatch/tests/_models_info.py +161 -0
- daal4py/sklearn/monkeypatch/tests/test_monkeypatch.py +71 -0
- daal4py/sklearn/monkeypatch/tests/test_patching.py +87 -0
- daal4py/sklearn/monkeypatch/tests/utils/_launch_algorithms.py +118 -0
- daal4py/sklearn/neighbors/__init__.py +21 -0
- daal4py/sklearn/neighbors/_base.py +503 -0
- daal4py/sklearn/neighbors/_classification.py +139 -0
- daal4py/sklearn/neighbors/_regression.py +74 -0
- daal4py/sklearn/neighbors/_unsupervised.py +55 -0
- daal4py/sklearn/neighbors/tests/test_kneighbors.py +113 -0
- daal4py/sklearn/svm/__init__.py +19 -0
- daal4py/sklearn/svm/svm.py +734 -0
- daal4py/sklearn/utils/__init__.py +21 -0
- daal4py/sklearn/utils/base.py +75 -0
- daal4py/sklearn/utils/tests/test_utils.py +51 -0
- daal4py/sklearn/utils/validation.py +693 -0
- onedal/__init__.py +83 -0
- onedal/_config.py +53 -0
- onedal/_device_offload.py +229 -0
- onedal/_onedal_py_dpc.cpython-39-x86_64-linux-gnu.so +0 -0
- onedal/_onedal_py_host.cpython-39-x86_64-linux-gnu.so +0 -0
- onedal/_onedal_py_spmd_dpc.cpython-39-x86_64-linux-gnu.so +0 -0
- onedal/basic_statistics/__init__.py +20 -0
- onedal/basic_statistics/basic_statistics.py +107 -0
- onedal/basic_statistics/incremental_basic_statistics.py +160 -0
- onedal/basic_statistics/tests/test_basic_statistics.py +298 -0
- onedal/basic_statistics/tests/test_incremental_basic_statistics.py +196 -0
- onedal/cluster/__init__.py +27 -0
- onedal/cluster/dbscan.py +110 -0
- onedal/cluster/kmeans.py +560 -0
- onedal/cluster/kmeans_init.py +115 -0
- onedal/cluster/tests/test_dbscan.py +125 -0
- onedal/cluster/tests/test_kmeans.py +88 -0
- onedal/cluster/tests/test_kmeans_init.py +93 -0
- onedal/common/_base.py +38 -0
- onedal/common/_estimator_checks.py +47 -0
- onedal/common/_mixin.py +62 -0
- onedal/common/_policy.py +59 -0
- onedal/common/_spmd_policy.py +30 -0
- onedal/common/hyperparameters.py +116 -0
- onedal/common/tests/test_policy.py +75 -0
- onedal/covariance/__init__.py +20 -0
- onedal/covariance/covariance.py +125 -0
- onedal/covariance/incremental_covariance.py +146 -0
- onedal/covariance/tests/test_covariance.py +50 -0
- onedal/covariance/tests/test_incremental_covariance.py +122 -0
- onedal/datatypes/__init__.py +19 -0
- onedal/datatypes/_data_conversion.py +95 -0
- onedal/datatypes/tests/test_data.py +235 -0
- onedal/decomposition/__init__.py +20 -0
- onedal/decomposition/incremental_pca.py +204 -0
- onedal/decomposition/pca.py +186 -0
- onedal/decomposition/tests/test_incremental_pca.py +198 -0
- onedal/ensemble/__init__.py +29 -0
- onedal/ensemble/forest.py +720 -0
- onedal/ensemble/tests/test_random_forest.py +97 -0
- onedal/linear_model/__init__.py +27 -0
- onedal/linear_model/incremental_linear_model.py +258 -0
- onedal/linear_model/linear_model.py +329 -0
- onedal/linear_model/logistic_regression.py +249 -0
- onedal/linear_model/tests/test_incremental_linear_regression.py +168 -0
- onedal/linear_model/tests/test_incremental_ridge_regression.py +107 -0
- onedal/linear_model/tests/test_linear_regression.py +149 -0
- onedal/linear_model/tests/test_logistic_regression.py +95 -0
- onedal/linear_model/tests/test_ridge.py +95 -0
- onedal/neighbors/__init__.py +19 -0
- onedal/neighbors/neighbors.py +778 -0
- onedal/neighbors/tests/test_knn_classification.py +49 -0
- onedal/primitives/__init__.py +27 -0
- onedal/primitives/get_tree.py +25 -0
- onedal/primitives/kernel_functions.py +153 -0
- onedal/primitives/tests/test_kernel_functions.py +159 -0
- onedal/spmd/__init__.py +25 -0
- onedal/spmd/_base.py +30 -0
- onedal/spmd/basic_statistics/__init__.py +20 -0
- onedal/spmd/basic_statistics/basic_statistics.py +30 -0
- onedal/spmd/basic_statistics/incremental_basic_statistics.py +69 -0
- onedal/spmd/cluster/__init__.py +28 -0
- onedal/spmd/cluster/dbscan.py +23 -0
- onedal/spmd/cluster/kmeans.py +56 -0
- onedal/spmd/covariance/__init__.py +20 -0
- onedal/spmd/covariance/covariance.py +26 -0
- onedal/spmd/covariance/incremental_covariance.py +82 -0
- onedal/spmd/decomposition/__init__.py +20 -0
- onedal/spmd/decomposition/incremental_pca.py +117 -0
- onedal/spmd/decomposition/pca.py +26 -0
- onedal/spmd/ensemble/__init__.py +19 -0
- onedal/spmd/ensemble/forest.py +28 -0
- onedal/spmd/linear_model/__init__.py +21 -0
- onedal/spmd/linear_model/incremental_linear_model.py +97 -0
- onedal/spmd/linear_model/linear_model.py +30 -0
- onedal/spmd/linear_model/logistic_regression.py +38 -0
- onedal/spmd/neighbors/__init__.py +19 -0
- onedal/spmd/neighbors/neighbors.py +75 -0
- onedal/svm/__init__.py +19 -0
- onedal/svm/svm.py +556 -0
- onedal/svm/tests/test_csr_svm.py +351 -0
- onedal/svm/tests/test_nusvc.py +204 -0
- onedal/svm/tests/test_nusvr.py +210 -0
- onedal/svm/tests/test_svc.py +168 -0
- onedal/svm/tests/test_svr.py +243 -0
- onedal/tests/test_common.py +41 -0
- onedal/tests/utils/_dataframes_support.py +168 -0
- onedal/tests/utils/_device_selection.py +107 -0
- onedal/utils/__init__.py +49 -0
- onedal/utils/_array_api.py +91 -0
- onedal/utils/validation.py +432 -0
- scikit_learn_intelex-2025.0.0.dist-info/LICENSE.txt +202 -0
- scikit_learn_intelex-2025.0.0.dist-info/METADATA +231 -0
- scikit_learn_intelex-2025.0.0.dist-info/RECORD +278 -0
- scikit_learn_intelex-2025.0.0.dist-info/WHEEL +5 -0
- scikit_learn_intelex-2025.0.0.dist-info/top_level.txt +3 -0
- sklearnex/__init__.py +65 -0
- sklearnex/__main__.py +58 -0
- sklearnex/_config.py +98 -0
- sklearnex/_device_offload.py +121 -0
- sklearnex/_utils.py +109 -0
- sklearnex/basic_statistics/__init__.py +20 -0
- sklearnex/basic_statistics/basic_statistics.py +140 -0
- sklearnex/basic_statistics/incremental_basic_statistics.py +288 -0
- sklearnex/basic_statistics/tests/test_basic_statistics.py +251 -0
- sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +384 -0
- sklearnex/cluster/__init__.py +20 -0
- sklearnex/cluster/dbscan.py +192 -0
- sklearnex/cluster/k_means.py +383 -0
- sklearnex/cluster/tests/test_dbscan.py +38 -0
- sklearnex/cluster/tests/test_kmeans.py +153 -0
- sklearnex/conftest.py +73 -0
- sklearnex/covariance/__init__.py +19 -0
- sklearnex/covariance/incremental_covariance.py +368 -0
- sklearnex/covariance/tests/test_incremental_covariance.py +226 -0
- sklearnex/decomposition/__init__.py +19 -0
- sklearnex/decomposition/pca.py +414 -0
- sklearnex/decomposition/tests/test_pca.py +58 -0
- sklearnex/dispatcher.py +543 -0
- sklearnex/doc/third-party-programs.txt +424 -0
- sklearnex/ensemble/__init__.py +29 -0
- sklearnex/ensemble/_forest.py +2016 -0
- sklearnex/ensemble/tests/test_forest.py +120 -0
- sklearnex/glob/__main__.py +72 -0
- sklearnex/glob/dispatcher.py +101 -0
- sklearnex/linear_model/__init__.py +32 -0
- sklearnex/linear_model/coordinate_descent.py +30 -0
- sklearnex/linear_model/incremental_linear.py +463 -0
- sklearnex/linear_model/incremental_ridge.py +418 -0
- sklearnex/linear_model/linear.py +302 -0
- sklearnex/linear_model/logistic_path.py +17 -0
- sklearnex/linear_model/logistic_regression.py +403 -0
- sklearnex/linear_model/ridge.py +24 -0
- sklearnex/linear_model/tests/test_incremental_linear.py +203 -0
- sklearnex/linear_model/tests/test_incremental_ridge.py +153 -0
- sklearnex/linear_model/tests/test_linear.py +142 -0
- sklearnex/linear_model/tests/test_logreg.py +134 -0
- sklearnex/manifold/__init__.py +19 -0
- sklearnex/manifold/t_sne.py +21 -0
- sklearnex/manifold/tests/test_tsne.py +26 -0
- sklearnex/metrics/__init__.py +23 -0
- sklearnex/metrics/pairwise.py +22 -0
- sklearnex/metrics/ranking.py +20 -0
- sklearnex/metrics/tests/test_metrics.py +39 -0
- sklearnex/model_selection/__init__.py +21 -0
- sklearnex/model_selection/split.py +22 -0
- sklearnex/model_selection/tests/test_model_selection.py +34 -0
- sklearnex/neighbors/__init__.py +27 -0
- sklearnex/neighbors/_lof.py +231 -0
- sklearnex/neighbors/common.py +310 -0
- sklearnex/neighbors/knn_classification.py +226 -0
- sklearnex/neighbors/knn_regression.py +203 -0
- sklearnex/neighbors/knn_unsupervised.py +170 -0
- sklearnex/neighbors/tests/test_neighbors.py +80 -0
- sklearnex/preview/__init__.py +17 -0
- sklearnex/preview/covariance/__init__.py +19 -0
- sklearnex/preview/covariance/covariance.py +133 -0
- sklearnex/preview/covariance/tests/test_covariance.py +66 -0
- sklearnex/preview/decomposition/__init__.py +19 -0
- sklearnex/preview/decomposition/incremental_pca.py +228 -0
- sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
- sklearnex/preview/linear_model/__init__.py +19 -0
- sklearnex/preview/linear_model/ridge.py +419 -0
- sklearnex/preview/linear_model/tests/test_ridge.py +102 -0
- sklearnex/spmd/__init__.py +25 -0
- sklearnex/spmd/basic_statistics/__init__.py +20 -0
- sklearnex/spmd/basic_statistics/basic_statistics.py +21 -0
- sklearnex/spmd/basic_statistics/incremental_basic_statistics.py +30 -0
- sklearnex/spmd/basic_statistics/tests/test_basic_statistics_spmd.py +107 -0
- sklearnex/spmd/basic_statistics/tests/test_incremental_basic_statistics_spmd.py +307 -0
- sklearnex/spmd/cluster/__init__.py +30 -0
- sklearnex/spmd/cluster/dbscan.py +50 -0
- sklearnex/spmd/cluster/kmeans.py +21 -0
- sklearnex/spmd/cluster/tests/test_dbscan_spmd.py +97 -0
- sklearnex/spmd/cluster/tests/test_kmeans_spmd.py +172 -0
- sklearnex/spmd/covariance/__init__.py +20 -0
- sklearnex/spmd/covariance/covariance.py +21 -0
- sklearnex/spmd/covariance/incremental_covariance.py +37 -0
- sklearnex/spmd/covariance/tests/test_covariance_spmd.py +107 -0
- sklearnex/spmd/covariance/tests/test_incremental_covariance_spmd.py +184 -0
- sklearnex/spmd/decomposition/__init__.py +20 -0
- sklearnex/spmd/decomposition/incremental_pca.py +30 -0
- sklearnex/spmd/decomposition/pca.py +21 -0
- sklearnex/spmd/decomposition/tests/test_incremental_pca_spmd.py +269 -0
- sklearnex/spmd/decomposition/tests/test_pca_spmd.py +128 -0
- sklearnex/spmd/ensemble/__init__.py +19 -0
- sklearnex/spmd/ensemble/forest.py +71 -0
- sklearnex/spmd/ensemble/tests/test_forest_spmd.py +265 -0
- sklearnex/spmd/linear_model/__init__.py +21 -0
- sklearnex/spmd/linear_model/incremental_linear_model.py +35 -0
- sklearnex/spmd/linear_model/linear_model.py +21 -0
- sklearnex/spmd/linear_model/logistic_regression.py +21 -0
- sklearnex/spmd/linear_model/tests/test_incremental_linear_spmd.py +329 -0
- sklearnex/spmd/linear_model/tests/test_linear_regression_spmd.py +145 -0
- sklearnex/spmd/linear_model/tests/test_logistic_regression_spmd.py +166 -0
- sklearnex/spmd/neighbors/__init__.py +19 -0
- sklearnex/spmd/neighbors/neighbors.py +25 -0
- sklearnex/spmd/neighbors/tests/test_neighbors_spmd.py +288 -0
- sklearnex/svm/__init__.py +29 -0
- sklearnex/svm/_common.py +328 -0
- sklearnex/svm/nusvc.py +332 -0
- sklearnex/svm/nusvr.py +148 -0
- sklearnex/svm/svc.py +360 -0
- sklearnex/svm/svr.py +149 -0
- sklearnex/svm/tests/test_svm.py +93 -0
- sklearnex/tests/_utils.py +328 -0
- sklearnex/tests/_utils_spmd.py +198 -0
- sklearnex/tests/test_common.py +54 -0
- sklearnex/tests/test_config.py +43 -0
- sklearnex/tests/test_memory_usage.py +291 -0
- sklearnex/tests/test_monkeypatch.py +276 -0
- sklearnex/tests/test_n_jobs_support.py +103 -0
- sklearnex/tests/test_parallel.py +48 -0
- sklearnex/tests/test_patching.py +385 -0
- sklearnex/tests/test_run_to_run_stability.py +296 -0
- sklearnex/utils/__init__.py +19 -0
- sklearnex/utils/_array_api.py +82 -0
- sklearnex/utils/parallel.py +59 -0
- sklearnex/utils/tests/test_finite.py +89 -0
- sklearnex/utils/validation.py +17 -0
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
# ===============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ===============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from daal4py.sklearn._utils import daal_check_version
|
|
22
|
+
from onedal.tests.utils._dataframes_support import (
|
|
23
|
+
_as_numpy,
|
|
24
|
+
_convert_to_dataframe,
|
|
25
|
+
get_dataframes_and_queues,
|
|
26
|
+
)
|
|
27
|
+
from sklearnex.preview.decomposition import IncrementalPCA
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
31
|
+
def test_sklearnex_import(dataframe, queue):
|
|
32
|
+
X = [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]
|
|
33
|
+
X = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
34
|
+
incpca = IncrementalPCA(n_components=2)
|
|
35
|
+
result = incpca.fit(X)
|
|
36
|
+
assert "sklearnex" in incpca.__module__
|
|
37
|
+
assert hasattr(incpca, "_onedal_estimator")
|
|
38
|
+
assert_allclose(_as_numpy(result.singular_values_), [6.30061232, 0.54980396])
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def check_pca_on_gold_data(incpca, dtype, whiten, transformed_data):
|
|
42
|
+
expected_n_samples_seen_ = 6
|
|
43
|
+
expected_n_features_in_ = 2
|
|
44
|
+
expected_n_components_ = 2
|
|
45
|
+
expected_components_ = np.array([[0.83849224, 0.54491354], [-0.54491354, 0.83849224]])
|
|
46
|
+
expected_singular_values_ = np.array([6.30061232, 0.54980396])
|
|
47
|
+
expected_mean_ = np.array([0, 0])
|
|
48
|
+
expected_var_ = np.array([5.6, 2.4])
|
|
49
|
+
expected_explained_variance_ = np.array([7.93954312, 0.06045688])
|
|
50
|
+
expected_explained_variance_ratio_ = np.array([0.99244289, 0.00755711])
|
|
51
|
+
expected_noise_variance_ = 0.0
|
|
52
|
+
expected_transformed_data = (
|
|
53
|
+
np.array(
|
|
54
|
+
[
|
|
55
|
+
[-0.49096647, -1.19399271],
|
|
56
|
+
[-0.78854479, 1.02218579],
|
|
57
|
+
[-1.27951125, -0.17180692],
|
|
58
|
+
[0.49096647, 1.19399271],
|
|
59
|
+
[0.78854479, -1.02218579],
|
|
60
|
+
[1.27951125, 0.17180692],
|
|
61
|
+
]
|
|
62
|
+
)
|
|
63
|
+
if whiten
|
|
64
|
+
else np.array(
|
|
65
|
+
[
|
|
66
|
+
[-1.38340578, -0.2935787],
|
|
67
|
+
[-2.22189802, 0.25133484],
|
|
68
|
+
[-3.6053038, -0.04224385],
|
|
69
|
+
[1.38340578, 0.2935787],
|
|
70
|
+
[2.22189802, -0.25133484],
|
|
71
|
+
[3.6053038, 0.04224385],
|
|
72
|
+
]
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
tol = 1e-7
|
|
77
|
+
if dtype == np.float32:
|
|
78
|
+
tol = 7e-6 if whiten else 1e-6
|
|
79
|
+
|
|
80
|
+
assert incpca.n_samples_seen_ == expected_n_samples_seen_
|
|
81
|
+
assert incpca.n_features_in_ == expected_n_features_in_
|
|
82
|
+
assert incpca.n_components_ == expected_n_components_
|
|
83
|
+
|
|
84
|
+
assert_allclose(incpca.singular_values_, expected_singular_values_, atol=tol)
|
|
85
|
+
assert_allclose(incpca.mean_, expected_mean_, atol=tol)
|
|
86
|
+
assert_allclose(incpca.var_, expected_var_, atol=tol)
|
|
87
|
+
assert_allclose(incpca.explained_variance_, expected_explained_variance_, atol=tol)
|
|
88
|
+
assert_allclose(
|
|
89
|
+
incpca.explained_variance_ratio_, expected_explained_variance_ratio_, atol=tol
|
|
90
|
+
)
|
|
91
|
+
assert np.abs(incpca.noise_variance_ - expected_noise_variance_) < tol
|
|
92
|
+
if daal_check_version((2024, "P", 500)):
|
|
93
|
+
assert_allclose(incpca.components_, expected_components_, atol=tol)
|
|
94
|
+
assert_allclose(_as_numpy(transformed_data), expected_transformed_data, atol=tol)
|
|
95
|
+
else:
|
|
96
|
+
for i in range(incpca.n_components_):
|
|
97
|
+
abs_dot_product = np.abs(
|
|
98
|
+
np.dot(incpca.components_[i], expected_components_[i])
|
|
99
|
+
)
|
|
100
|
+
assert np.abs(abs_dot_product - 1.0) < tol
|
|
101
|
+
|
|
102
|
+
if np.dot(incpca.components_[i], expected_components_[i]) < 0:
|
|
103
|
+
assert_allclose(
|
|
104
|
+
_as_numpy(-transformed_data[i]),
|
|
105
|
+
expected_transformed_data[i],
|
|
106
|
+
atol=tol,
|
|
107
|
+
)
|
|
108
|
+
else:
|
|
109
|
+
assert_allclose(
|
|
110
|
+
_as_numpy(transformed_data[i]), expected_transformed_data[i], atol=tol
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def check_pca(incpca, dtype, whiten, data, transformed_data):
|
|
115
|
+
tol = 3e-3 if dtype == np.float32 else 2e-6
|
|
116
|
+
|
|
117
|
+
n_components = incpca.n_components_
|
|
118
|
+
|
|
119
|
+
expected_n_samples_seen = data.shape[0]
|
|
120
|
+
expected_n_features_in = data.shape[1]
|
|
121
|
+
n_samples_seen = incpca.n_samples_seen_
|
|
122
|
+
n_features_in = incpca.n_features_in_
|
|
123
|
+
assert n_samples_seen == expected_n_samples_seen
|
|
124
|
+
assert n_features_in == expected_n_features_in
|
|
125
|
+
|
|
126
|
+
components = incpca.components_
|
|
127
|
+
singular_values = incpca.singular_values_
|
|
128
|
+
centered_data = data - np.mean(data, axis=0)
|
|
129
|
+
cov_eigenvalues, cov_eigenvectors = np.linalg.eig(
|
|
130
|
+
centered_data.T @ centered_data / (n_samples_seen - 1)
|
|
131
|
+
)
|
|
132
|
+
cov_eigenvalues = np.nan_to_num(cov_eigenvalues)
|
|
133
|
+
cov_eigenvalues[cov_eigenvalues < 0] = 0
|
|
134
|
+
eigenvalues_order = np.argsort(cov_eigenvalues)[::-1]
|
|
135
|
+
sorted_eigenvalues = cov_eigenvalues[eigenvalues_order]
|
|
136
|
+
sorted_eigenvectors = cov_eigenvectors[:, eigenvalues_order]
|
|
137
|
+
expected_singular_values = np.sqrt(sorted_eigenvalues * (n_samples_seen - 1))[
|
|
138
|
+
:n_components
|
|
139
|
+
]
|
|
140
|
+
expected_components = sorted_eigenvectors.T[:n_components]
|
|
141
|
+
|
|
142
|
+
assert_allclose(singular_values, expected_singular_values, atol=tol)
|
|
143
|
+
for i in range(n_components):
|
|
144
|
+
component_length = np.dot(components[i], components[i])
|
|
145
|
+
assert np.abs(component_length - 1.0) < tol
|
|
146
|
+
abs_dot_product = np.abs(np.dot(components[i], expected_components[i]))
|
|
147
|
+
assert np.abs(abs_dot_product - 1.0) < tol
|
|
148
|
+
|
|
149
|
+
expected_mean = np.mean(data, axis=0)
|
|
150
|
+
assert_allclose(incpca.mean_, expected_mean, atol=tol)
|
|
151
|
+
|
|
152
|
+
expected_var = np.var(_as_numpy(data), ddof=1, axis=0)
|
|
153
|
+
assert_allclose(incpca.var_, expected_var, atol=tol)
|
|
154
|
+
|
|
155
|
+
expected_explained_variance = sorted_eigenvalues[:n_components]
|
|
156
|
+
assert_allclose(incpca.explained_variance_, expected_explained_variance, atol=tol)
|
|
157
|
+
|
|
158
|
+
expected_explained_variance_ratio = expected_explained_variance / np.sum(
|
|
159
|
+
sorted_eigenvalues
|
|
160
|
+
)
|
|
161
|
+
assert_allclose(
|
|
162
|
+
incpca.explained_variance_ratio_, expected_explained_variance_ratio, atol=tol
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
expected_noise_variance = (
|
|
166
|
+
np.mean(sorted_eigenvalues[n_components:])
|
|
167
|
+
if len(sorted_eigenvalues) > n_components
|
|
168
|
+
else 0.0
|
|
169
|
+
)
|
|
170
|
+
# TODO Fix noise variance computation (It is necessary to update C++ side)
|
|
171
|
+
# assert np.abs(incpca.noise_variance_ - expected_noise_variance) < tol
|
|
172
|
+
|
|
173
|
+
expected_transformed_data = centered_data @ components.T
|
|
174
|
+
if whiten:
|
|
175
|
+
scale = np.sqrt(incpca.explained_variance_)
|
|
176
|
+
min_scale = np.finfo(scale.dtype).eps
|
|
177
|
+
scale[scale < min_scale] = np.inf
|
|
178
|
+
expected_transformed_data /= scale
|
|
179
|
+
|
|
180
|
+
if not (whiten and n_components == n_samples_seen):
|
|
181
|
+
assert_allclose(_as_numpy(transformed_data), expected_transformed_data, atol=tol)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
185
|
+
@pytest.mark.parametrize("whiten", [True, False])
|
|
186
|
+
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
|
|
187
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
188
|
+
def test_sklearnex_partial_fit_on_gold_data(dataframe, queue, whiten, num_blocks, dtype):
|
|
189
|
+
|
|
190
|
+
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
|
|
191
|
+
X = X.astype(dtype=dtype)
|
|
192
|
+
X_split = np.array_split(X, num_blocks)
|
|
193
|
+
incpca = IncrementalPCA(whiten=whiten)
|
|
194
|
+
|
|
195
|
+
for i in range(num_blocks):
|
|
196
|
+
X_split_df = _convert_to_dataframe(
|
|
197
|
+
X_split[i], sycl_queue=queue, target_df=dataframe
|
|
198
|
+
)
|
|
199
|
+
incpca.partial_fit(X_split_df)
|
|
200
|
+
|
|
201
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
202
|
+
transformed_data = incpca.transform(X_df)
|
|
203
|
+
check_pca_on_gold_data(incpca, dtype, whiten, transformed_data)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
207
|
+
@pytest.mark.parametrize("whiten", [True, False])
|
|
208
|
+
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
|
|
209
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
210
|
+
def test_sklearnex_fit_on_gold_data(dataframe, queue, whiten, num_blocks, dtype):
|
|
211
|
+
|
|
212
|
+
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
|
|
213
|
+
X = X.astype(dtype=dtype)
|
|
214
|
+
incpca = IncrementalPCA(whiten=whiten, batch_size=X.shape[0] // num_blocks)
|
|
215
|
+
|
|
216
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
217
|
+
incpca.fit(X_df)
|
|
218
|
+
transformed_data = incpca.transform(X_df)
|
|
219
|
+
|
|
220
|
+
check_pca_on_gold_data(incpca, dtype, whiten, transformed_data)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
224
|
+
@pytest.mark.parametrize("whiten", [True, False])
|
|
225
|
+
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
|
|
226
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
227
|
+
def test_sklearnex_fit_transform_on_gold_data(
|
|
228
|
+
dataframe, queue, whiten, num_blocks, dtype
|
|
229
|
+
):
|
|
230
|
+
|
|
231
|
+
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
|
|
232
|
+
X = X.astype(dtype=dtype)
|
|
233
|
+
incpca = IncrementalPCA(whiten=whiten, batch_size=X.shape[0] // num_blocks)
|
|
234
|
+
|
|
235
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
236
|
+
transformed_data = incpca.fit_transform(X_df)
|
|
237
|
+
|
|
238
|
+
check_pca_on_gold_data(incpca, dtype, whiten, transformed_data)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
242
|
+
@pytest.mark.parametrize("n_components", [None, 1, 5])
|
|
243
|
+
@pytest.mark.parametrize("whiten", [True, False])
|
|
244
|
+
@pytest.mark.parametrize("num_blocks", [1, 10])
|
|
245
|
+
@pytest.mark.parametrize("row_count", [100, 1000])
|
|
246
|
+
@pytest.mark.parametrize("column_count", [10, 100])
|
|
247
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
248
|
+
def test_sklearnex_partial_fit_on_random_data(
|
|
249
|
+
dataframe, queue, n_components, whiten, num_blocks, row_count, column_count, dtype
|
|
250
|
+
):
|
|
251
|
+
seed = 81
|
|
252
|
+
gen = np.random.default_rng(seed)
|
|
253
|
+
X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
|
|
254
|
+
X = X.astype(dtype=dtype)
|
|
255
|
+
X_split = np.array_split(X, num_blocks)
|
|
256
|
+
incpca = IncrementalPCA(n_components=n_components, whiten=whiten)
|
|
257
|
+
|
|
258
|
+
for i in range(num_blocks):
|
|
259
|
+
X_split_df = _convert_to_dataframe(
|
|
260
|
+
X_split[i], sycl_queue=queue, target_df=dataframe
|
|
261
|
+
)
|
|
262
|
+
incpca.partial_fit(X_split_df)
|
|
263
|
+
|
|
264
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
265
|
+
transformed_data = incpca.transform(X_df)
|
|
266
|
+
check_pca(incpca, dtype, whiten, X, transformed_data)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# ===============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ===============================================================================
|
|
16
|
+
|
|
17
|
+
from .ridge import Ridge
|
|
18
|
+
|
|
19
|
+
__all__ = ["Ridge"]
|
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
# ===============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ===============================================================================
|
|
16
|
+
|
|
17
|
+
import logging
|
|
18
|
+
|
|
19
|
+
from daal4py.sklearn._utils import daal_check_version, sklearn_check_version
|
|
20
|
+
|
|
21
|
+
if daal_check_version((2024, "P", 600)):
|
|
22
|
+
import numbers
|
|
23
|
+
|
|
24
|
+
import numpy as np
|
|
25
|
+
from scipy.sparse import issparse
|
|
26
|
+
from sklearn.linear_model import Ridge as sklearn_Ridge
|
|
27
|
+
from sklearn.metrics import r2_score
|
|
28
|
+
from sklearn.utils.validation import check_is_fitted, check_X_y
|
|
29
|
+
|
|
30
|
+
from daal4py.sklearn.linear_model._ridge import _fit_ridge as daal4py_fit_ridge
|
|
31
|
+
|
|
32
|
+
if sklearn_check_version("1.0") and not sklearn_check_version("1.2"):
|
|
33
|
+
from sklearn.linear_model._base import _deprecate_normalize
|
|
34
|
+
if sklearn_check_version("1.1") and not sklearn_check_version("1.2"):
|
|
35
|
+
from sklearn.utils import check_scalar
|
|
36
|
+
|
|
37
|
+
from onedal.linear_model import Ridge as onedal_Ridge
|
|
38
|
+
from onedal.utils import _num_features, _num_samples
|
|
39
|
+
|
|
40
|
+
from ..._device_offload import dispatch, wrap_output_data
|
|
41
|
+
from ..._utils import PatchingConditionsChain
|
|
42
|
+
|
|
43
|
+
def _is_numeric_scalar(value):
|
|
44
|
+
"""
|
|
45
|
+
Determines if the provided value is a numeric scalar.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
value: The value to be checked.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
bool: True if the value is a numeric scalar, False otherwise.
|
|
52
|
+
"""
|
|
53
|
+
return isinstance(value, numbers.Real)
|
|
54
|
+
|
|
55
|
+
class Ridge(sklearn_Ridge):
|
|
56
|
+
__doc__ = sklearn_Ridge.__doc__
|
|
57
|
+
|
|
58
|
+
if sklearn_check_version("1.2"):
|
|
59
|
+
_parameter_constraints: dict = {**sklearn_Ridge._parameter_constraints}
|
|
60
|
+
|
|
61
|
+
def __init__(
|
|
62
|
+
self,
|
|
63
|
+
alpha=1.0,
|
|
64
|
+
fit_intercept=True,
|
|
65
|
+
copy_X=True,
|
|
66
|
+
max_iter=None,
|
|
67
|
+
tol=1e-4,
|
|
68
|
+
solver="auto",
|
|
69
|
+
positive=False,
|
|
70
|
+
random_state=None,
|
|
71
|
+
):
|
|
72
|
+
super().__init__(
|
|
73
|
+
alpha=alpha,
|
|
74
|
+
fit_intercept=fit_intercept,
|
|
75
|
+
copy_X=copy_X,
|
|
76
|
+
max_iter=max_iter,
|
|
77
|
+
tol=tol,
|
|
78
|
+
solver=solver,
|
|
79
|
+
positive=positive,
|
|
80
|
+
random_state=random_state,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
elif sklearn_check_version("1.0"):
|
|
84
|
+
|
|
85
|
+
def __init__(
|
|
86
|
+
self,
|
|
87
|
+
alpha=1.0,
|
|
88
|
+
fit_intercept=True,
|
|
89
|
+
normalize="deprecated",
|
|
90
|
+
copy_X=True,
|
|
91
|
+
max_iter=None,
|
|
92
|
+
tol=1e-3,
|
|
93
|
+
solver="auto",
|
|
94
|
+
positive=False,
|
|
95
|
+
random_state=None,
|
|
96
|
+
):
|
|
97
|
+
super().__init__(
|
|
98
|
+
alpha=alpha,
|
|
99
|
+
fit_intercept=fit_intercept,
|
|
100
|
+
normalize=normalize,
|
|
101
|
+
copy_X=copy_X,
|
|
102
|
+
max_iter=max_iter,
|
|
103
|
+
solver=solver,
|
|
104
|
+
tol=tol,
|
|
105
|
+
positive=positive,
|
|
106
|
+
random_state=random_state,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
else:
|
|
110
|
+
|
|
111
|
+
def __init__(
|
|
112
|
+
self,
|
|
113
|
+
alpha=1.0,
|
|
114
|
+
fit_intercept=True,
|
|
115
|
+
normalize=False,
|
|
116
|
+
copy_X=True,
|
|
117
|
+
max_iter=None,
|
|
118
|
+
tol=1e-3,
|
|
119
|
+
solver="auto",
|
|
120
|
+
random_state=None,
|
|
121
|
+
):
|
|
122
|
+
super().__init__(
|
|
123
|
+
alpha=alpha,
|
|
124
|
+
fit_intercept=fit_intercept,
|
|
125
|
+
normalize=normalize,
|
|
126
|
+
copy_X=copy_X,
|
|
127
|
+
max_iter=max_iter,
|
|
128
|
+
tol=tol,
|
|
129
|
+
solver=solver,
|
|
130
|
+
random_state=random_state,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
def fit(self, X, y, sample_weight=None):
|
|
134
|
+
# It is necessary to properly update coefs for predict if we
|
|
135
|
+
# fallback to sklearn in dispatch
|
|
136
|
+
if hasattr(self, "_onedal_estimator"):
|
|
137
|
+
del self._onedal_estimator
|
|
138
|
+
|
|
139
|
+
dispatch(
|
|
140
|
+
self,
|
|
141
|
+
"fit",
|
|
142
|
+
{
|
|
143
|
+
"onedal": self.__class__._onedal_fit,
|
|
144
|
+
"sklearn": sklearn_Ridge.fit,
|
|
145
|
+
},
|
|
146
|
+
X,
|
|
147
|
+
y,
|
|
148
|
+
sample_weight,
|
|
149
|
+
)
|
|
150
|
+
return self
|
|
151
|
+
|
|
152
|
+
@wrap_output_data
|
|
153
|
+
def predict(self, X):
|
|
154
|
+
check_is_fitted(self)
|
|
155
|
+
|
|
156
|
+
return dispatch(
|
|
157
|
+
self,
|
|
158
|
+
"predict",
|
|
159
|
+
{
|
|
160
|
+
"onedal": self.__class__._onedal_predict,
|
|
161
|
+
"sklearn": sklearn_Ridge.predict,
|
|
162
|
+
},
|
|
163
|
+
X,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
@wrap_output_data
|
|
167
|
+
def score(self, X, y, sample_weight=None):
|
|
168
|
+
check_is_fitted(self)
|
|
169
|
+
|
|
170
|
+
return dispatch(
|
|
171
|
+
self,
|
|
172
|
+
"score",
|
|
173
|
+
{
|
|
174
|
+
"onedal": self.__class__._onedal_score,
|
|
175
|
+
"sklearn": sklearn_Ridge.score,
|
|
176
|
+
},
|
|
177
|
+
X,
|
|
178
|
+
y,
|
|
179
|
+
sample_weight=sample_weight,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
def _onedal_fit_supported(self, patching_status, method_name, *data):
|
|
183
|
+
assert method_name == "fit"
|
|
184
|
+
assert len(data) == 3
|
|
185
|
+
X, y, sample_weight = data
|
|
186
|
+
|
|
187
|
+
normalize_is_set = (
|
|
188
|
+
hasattr(self, "normalize")
|
|
189
|
+
and self.normalize
|
|
190
|
+
and self.normalize != "deprecated"
|
|
191
|
+
)
|
|
192
|
+
positive_is_set = hasattr(self, "positive") and self.positive
|
|
193
|
+
|
|
194
|
+
n_samples = _num_samples(X)
|
|
195
|
+
n_features = _num_features(X, fallback_1d=True)
|
|
196
|
+
|
|
197
|
+
# Check if equations are well defined
|
|
198
|
+
is_underdetermined = n_samples < (n_features + int(self.fit_intercept))
|
|
199
|
+
|
|
200
|
+
patching_status.and_conditions(
|
|
201
|
+
[
|
|
202
|
+
(
|
|
203
|
+
self.solver == "auto",
|
|
204
|
+
f"'{self.solver}' solver is not supported. "
|
|
205
|
+
"Only 'auto' solver is supported.",
|
|
206
|
+
),
|
|
207
|
+
(
|
|
208
|
+
not issparse(X) and not issparse(y),
|
|
209
|
+
"Sparse input is not supported.",
|
|
210
|
+
),
|
|
211
|
+
(
|
|
212
|
+
not is_underdetermined,
|
|
213
|
+
"The shape of X (fitting) does not satisfy oneDAL requirements:"
|
|
214
|
+
"Number of features + 1 >= number of samples.",
|
|
215
|
+
),
|
|
216
|
+
(sample_weight is None, "Sample weight is not supported."),
|
|
217
|
+
(not normalize_is_set, "Normalization is not supported."),
|
|
218
|
+
(
|
|
219
|
+
not positive_is_set,
|
|
220
|
+
"Forced positive coefficients are not supported.",
|
|
221
|
+
),
|
|
222
|
+
]
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
return patching_status
|
|
226
|
+
|
|
227
|
+
def _onedal_predict_supported(self, patching_status, method_name, *data):
|
|
228
|
+
assert method_name in ["predict", "score"]
|
|
229
|
+
assert len(data) <= 2
|
|
230
|
+
|
|
231
|
+
n_samples = _num_samples(data[0])
|
|
232
|
+
model_is_sparse = issparse(self.coef_) or (
|
|
233
|
+
self.fit_intercept and issparse(self.intercept_)
|
|
234
|
+
)
|
|
235
|
+
patching_status.and_conditions(
|
|
236
|
+
[
|
|
237
|
+
(
|
|
238
|
+
self.solver == "auto",
|
|
239
|
+
f"'{self.solver}' solver is not supported. "
|
|
240
|
+
"Only 'auto' solver is supported.",
|
|
241
|
+
),
|
|
242
|
+
(n_samples > 0, "Number of samples is less than 1."),
|
|
243
|
+
(not issparse(data[0]), "Sparse input is not supported."),
|
|
244
|
+
(not model_is_sparse, "Sparse coefficients are not supported."),
|
|
245
|
+
]
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
return patching_status
|
|
249
|
+
|
|
250
|
+
def _onedal_gpu_supported(self, method_name, *data):
|
|
251
|
+
patching_status = PatchingConditionsChain(
|
|
252
|
+
f"sklearn.linear_model.{self.__class__.__name__}.fit"
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
if method_name == "fit":
|
|
256
|
+
patching_status.and_condition(
|
|
257
|
+
_is_numeric_scalar(self.alpha),
|
|
258
|
+
"Non-scalar alpha is not supported for GPU.",
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
return self._onedal_fit_supported(patching_status, method_name, *data)
|
|
262
|
+
|
|
263
|
+
if method_name in ["predict", "score"]:
|
|
264
|
+
return self._onedal_predict_supported(patching_status, method_name, *data)
|
|
265
|
+
|
|
266
|
+
raise RuntimeError(
|
|
267
|
+
f"Unknown method {method_name} in {self.__class__.__name__}"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
def _onedal_cpu_supported(self, method_name, *data):
|
|
271
|
+
patching_status = PatchingConditionsChain(
|
|
272
|
+
f"sklearn.linear_model.{self.__class__.__name__}.fit"
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
if method_name == "fit":
|
|
276
|
+
return self._onedal_fit_supported(patching_status, method_name, *data)
|
|
277
|
+
|
|
278
|
+
if method_name in ["predict", "score"]:
|
|
279
|
+
return self._onedal_predict_supported(patching_status, method_name, *data)
|
|
280
|
+
|
|
281
|
+
raise RuntimeError(
|
|
282
|
+
f"Unknown method {method_name} in {self.__class__.__name__}"
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
def _initialize_onedal_estimator(self):
|
|
286
|
+
onedal_params = {
|
|
287
|
+
"fit_intercept": self.fit_intercept,
|
|
288
|
+
"alpha": self.alpha,
|
|
289
|
+
"copy_X": self.copy_X,
|
|
290
|
+
}
|
|
291
|
+
self._onedal_estimator = onedal_Ridge(**onedal_params)
|
|
292
|
+
|
|
293
|
+
def _daal_fit(self, X, y, sample_weight=None):
|
|
294
|
+
daal4py_fit_ridge(self, X, y, sample_weight)
|
|
295
|
+
self._onedal_estimator.n_features_in_ = _num_features(X, fallback_1d=True)
|
|
296
|
+
self._onedal_estimator.coef_ = self.coef_
|
|
297
|
+
self._onedal_estimator.intercept_ = self.intercept_
|
|
298
|
+
|
|
299
|
+
def _onedal_fit(self, X, y, sample_weight, queue=None):
|
|
300
|
+
# `Sample weight` is not supported. Expected to be None value.
|
|
301
|
+
assert sample_weight is None
|
|
302
|
+
|
|
303
|
+
if sklearn_check_version("1.2"):
|
|
304
|
+
self._validate_params()
|
|
305
|
+
elif sklearn_check_version("1.1"):
|
|
306
|
+
if self.max_iter is not None:
|
|
307
|
+
self.max_iter = check_scalar(
|
|
308
|
+
self.max_iter, "max_iter", target_type=numbers.Integral, min_val=1
|
|
309
|
+
)
|
|
310
|
+
self.tol = check_scalar(
|
|
311
|
+
self.tol, "tol", target_type=numbers.Real, min_val=0.0
|
|
312
|
+
)
|
|
313
|
+
if self.alpha is not None and not isinstance(
|
|
314
|
+
self.alpha, (np.ndarray, tuple)
|
|
315
|
+
):
|
|
316
|
+
self.alpha = check_scalar(
|
|
317
|
+
self.alpha,
|
|
318
|
+
"alpha",
|
|
319
|
+
target_type=numbers.Real,
|
|
320
|
+
min_val=0.0,
|
|
321
|
+
include_boundaries="left",
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
check_params = {
|
|
325
|
+
"X": X,
|
|
326
|
+
"y": y,
|
|
327
|
+
"dtype": [np.float64, np.float32],
|
|
328
|
+
"accept_sparse": ["csr", "csc", "coo"],
|
|
329
|
+
"y_numeric": True,
|
|
330
|
+
"multi_output": True,
|
|
331
|
+
}
|
|
332
|
+
if sklearn_check_version("1.0"):
|
|
333
|
+
X, y = self._validate_data(**check_params)
|
|
334
|
+
else:
|
|
335
|
+
X, y = check_X_y(**check_params)
|
|
336
|
+
|
|
337
|
+
if sklearn_check_version("1.0") and not sklearn_check_version("1.2"):
|
|
338
|
+
self._normalize = _deprecate_normalize(
|
|
339
|
+
self.normalize,
|
|
340
|
+
default=False,
|
|
341
|
+
estimator_name=self.__class__.__name__,
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
self._initialize_onedal_estimator()
|
|
345
|
+
|
|
346
|
+
# Falling back to daal4py if the device is CPU and alpha is array-like
|
|
347
|
+
# since onedal does not yet support non-scalars for alpha, thus
|
|
348
|
+
# should only be used for GPU/CPU with scalar alpha to not limit the functionality
|
|
349
|
+
cpu_device = queue is None or queue.sycl_device.is_cpu
|
|
350
|
+
if cpu_device and not _is_numeric_scalar(self.alpha):
|
|
351
|
+
self._daal_fit(X, y)
|
|
352
|
+
else:
|
|
353
|
+
self._onedal_estimator.fit(X, y, queue=queue)
|
|
354
|
+
|
|
355
|
+
self._save_attributes()
|
|
356
|
+
|
|
357
|
+
def _onedal_predict(self, X, queue=None):
|
|
358
|
+
if sklearn_check_version("1.0"):
|
|
359
|
+
X = self._validate_data(X, accept_sparse=False, reset=False)
|
|
360
|
+
|
|
361
|
+
if not hasattr(self, "_onedal_estimator"):
|
|
362
|
+
self._initialize_onedal_estimator()
|
|
363
|
+
self._onedal_estimator.coef_ = self.coef_
|
|
364
|
+
self._onedal_estimator.intercept_ = self.intercept_
|
|
365
|
+
|
|
366
|
+
res = self._onedal_estimator.predict(X, queue=queue)
|
|
367
|
+
return res
|
|
368
|
+
|
|
369
|
+
def _onedal_score(self, X, y, sample_weight=None, queue=None):
|
|
370
|
+
return r2_score(
|
|
371
|
+
y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
@property
|
|
375
|
+
def coef_(self):
|
|
376
|
+
return self._coef
|
|
377
|
+
|
|
378
|
+
@coef_.setter
|
|
379
|
+
def coef_(self, value):
|
|
380
|
+
if hasattr(self, "_onedal_estimator"):
|
|
381
|
+
self._onedal_estimator.coef_ = value
|
|
382
|
+
# checking if the model is already fitted and if so, deleting the model
|
|
383
|
+
if hasattr(self._onedal_estimator, "_onedal_model"):
|
|
384
|
+
del self._onedal_estimator._onedal_model
|
|
385
|
+
self._coef = value
|
|
386
|
+
|
|
387
|
+
@property
|
|
388
|
+
def intercept_(self):
|
|
389
|
+
return self._intercept
|
|
390
|
+
|
|
391
|
+
@intercept_.setter
|
|
392
|
+
def intercept_(self, value):
|
|
393
|
+
if hasattr(self, "_onedal_estimator"):
|
|
394
|
+
self._onedal_estimator.intercept_ = value
|
|
395
|
+
# checking if the model is already fitted and if so, deleting the model
|
|
396
|
+
if hasattr(self._onedal_estimator, "_onedal_model"):
|
|
397
|
+
del self._onedal_estimator._onedal_model
|
|
398
|
+
self._intercept = value
|
|
399
|
+
|
|
400
|
+
def _save_attributes(self):
|
|
401
|
+
self.n_features_in_ = self._onedal_estimator.n_features_in_
|
|
402
|
+
self._coef = self._onedal_estimator.coef_
|
|
403
|
+
self._intercept = self._onedal_estimator.intercept_
|
|
404
|
+
|
|
405
|
+
fit.__doc__ = sklearn_Ridge.fit.__doc__
|
|
406
|
+
predict.__doc__ = sklearn_Ridge.predict.__doc__
|
|
407
|
+
score.__doc__ = sklearn_Ridge.score.__doc__
|
|
408
|
+
|
|
409
|
+
else:
|
|
410
|
+
from daal4py.sklearn.linear_model._ridge import Ridge
|
|
411
|
+
from onedal._device_offload import support_input_format
|
|
412
|
+
|
|
413
|
+
Ridge.fit = support_input_format(queue_param=False)(Ridge.fit)
|
|
414
|
+
Ridge.predict = support_input_format(queue_param=False)(Ridge.predict)
|
|
415
|
+
Ridge.score = support_input_format(queue_param=False)(Ridge.score)
|
|
416
|
+
|
|
417
|
+
logging.warning(
|
|
418
|
+
"Preview Ridge requires oneDAL version >= 2024.6 but it was not found"
|
|
419
|
+
)
|