scikit-learn-intelex 2024.5.0__py311-none-manylinux1_x86_64.whl → 2024.7.0__py311-none-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of scikit-learn-intelex might be problematic. Click here for more details.
- {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/METADATA +2 -2
- scikit_learn_intelex-2024.7.0.dist-info/RECORD +122 -0
- sklearnex/_config.py +3 -15
- sklearnex/_device_offload.py +9 -168
- sklearnex/basic_statistics/basic_statistics.py +127 -1
- sklearnex/basic_statistics/tests/test_basic_statistics.py +251 -0
- sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +1 -1
- sklearnex/cluster/dbscan.py +3 -1
- sklearnex/cluster/k_means.py +8 -0
- sklearnex/cluster/tests/test_dbscan.py +8 -6
- sklearnex/cluster/tests/test_kmeans.py +15 -3
- sklearnex/conftest.py +11 -1
- sklearnex/covariance/incremental_covariance.py +64 -13
- sklearnex/covariance/tests/test_incremental_covariance.py +35 -0
- sklearnex/decomposition/pca.py +25 -1
- sklearnex/decomposition/tests/test_pca.py +4 -2
- sklearnex/dispatcher.py +109 -1
- sklearnex/ensemble/_forest.py +121 -57
- sklearnex/ensemble/tests/test_forest.py +7 -0
- sklearnex/glob/dispatcher.py +16 -2
- sklearnex/linear_model/coordinate_descent.py +13 -0
- sklearnex/linear_model/incremental_linear.py +102 -25
- sklearnex/linear_model/linear.py +25 -39
- sklearnex/linear_model/logistic_regression.py +92 -74
- sklearnex/linear_model/ridge.py +7 -0
- sklearnex/linear_model/tests/test_incremental_linear.py +10 -10
- sklearnex/linear_model/tests/test_linear.py +30 -5
- sklearnex/linear_model/tests/test_logreg.py +45 -3
- sklearnex/manifold/t_sne.py +4 -0
- sklearnex/metrics/pairwise.py +5 -0
- sklearnex/metrics/ranking.py +3 -0
- sklearnex/model_selection/split.py +3 -0
- sklearnex/neighbors/_lof.py +9 -0
- sklearnex/neighbors/common.py +45 -1
- sklearnex/neighbors/knn_classification.py +1 -20
- sklearnex/neighbors/knn_regression.py +25 -20
- sklearnex/neighbors/knn_unsupervised.py +31 -7
- sklearnex/preview/__init__.py +1 -1
- sklearnex/preview/decomposition/__init__.py +19 -0
- sklearnex/preview/decomposition/incremental_pca.py +228 -0
- sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
- sklearnex/preview/linear_model/__init__.py +19 -0
- sklearnex/preview/linear_model/ridge.py +419 -0
- sklearnex/preview/linear_model/tests/test_ridge.py +102 -0
- sklearnex/spmd/basic_statistics/tests/test_basic_statistics_spmd.py +107 -0
- sklearnex/spmd/cluster/tests/test_dbscan_spmd.py +97 -0
- sklearnex/spmd/cluster/tests/test_kmeans_spmd.py +172 -0
- sklearnex/spmd/covariance/tests/test_covariance_spmd.py +107 -0
- sklearnex/spmd/decomposition/tests/test_pca_spmd.py +128 -0
- sklearnex/spmd/ensemble/tests/test_forest_spmd.py +265 -0
- sklearnex/spmd/linear_model/tests/test_linear_regression_spmd.py +145 -0
- sklearnex/spmd/linear_model/tests/test_logistic_regression_spmd.py +163 -0
- sklearnex/spmd/neighbors/tests/test_neighbors_spmd.py +288 -0
- sklearnex/svm/_common.py +163 -20
- sklearnex/svm/nusvc.py +40 -4
- sklearnex/svm/nusvr.py +31 -2
- sklearnex/svm/svc.py +40 -4
- sklearnex/svm/svr.py +31 -2
- sklearnex/svm/tests/test_svm.py +12 -20
- sklearnex/tests/_utils.py +185 -30
- sklearnex/tests/_utils_spmd.py +185 -0
- sklearnex/tests/test_common.py +54 -0
- sklearnex/tests/test_config.py +4 -0
- sklearnex/tests/test_memory_usage.py +185 -126
- sklearnex/tests/test_monkeypatch.py +12 -4
- sklearnex/tests/test_patching.py +21 -25
- sklearnex/tests/test_run_to_run_stability.py +295 -0
- sklearnex/utils/_namespace.py +1 -1
- scikit_learn_intelex-2024.5.0.dist-info/RECORD +0 -104
- sklearnex/tests/test_run_to_run_stability_tests.py +0 -428
- {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/LICENSE.txt +0 -0
- {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/WHEEL +0 -0
- {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
|
|
20
|
+
from onedal.tests.utils._dataframes_support import (
|
|
21
|
+
_convert_to_dataframe,
|
|
22
|
+
get_dataframes_and_queues,
|
|
23
|
+
)
|
|
24
|
+
from sklearnex.tests._utils_spmd import (
|
|
25
|
+
_generate_clustering_data,
|
|
26
|
+
_get_local_tensor,
|
|
27
|
+
_mpi_libs_and_gpu_available,
|
|
28
|
+
_spmd_assert_allclose,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@pytest.mark.skipif(
|
|
33
|
+
not _mpi_libs_and_gpu_available,
|
|
34
|
+
reason="GPU device and MPI libs required for test",
|
|
35
|
+
)
|
|
36
|
+
@pytest.mark.parametrize(
|
|
37
|
+
"dataframe,queue",
|
|
38
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
39
|
+
)
|
|
40
|
+
@pytest.mark.mpi
|
|
41
|
+
def test_dbscan_spmd_gold(dataframe, queue):
|
|
42
|
+
# Import spmd and batch algo
|
|
43
|
+
from sklearnex.cluster import DBSCAN as DBSCAN_Batch
|
|
44
|
+
from sklearnex.spmd.cluster import DBSCAN as DBSCAN_SPMD
|
|
45
|
+
|
|
46
|
+
data = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]])
|
|
47
|
+
|
|
48
|
+
local_dpt_data = _convert_to_dataframe(
|
|
49
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Ensure labels from fit of batch algo matches spmd
|
|
53
|
+
spmd_model = DBSCAN_SPMD(eps=3, min_samples=2).fit(local_dpt_data)
|
|
54
|
+
batch_model = DBSCAN_Batch(eps=3, min_samples=2).fit(data)
|
|
55
|
+
|
|
56
|
+
_spmd_assert_allclose(spmd_model.labels_, batch_model.labels_)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@pytest.mark.skipif(
|
|
60
|
+
not _mpi_libs_and_gpu_available,
|
|
61
|
+
reason="GPU device and MPI libs required for test",
|
|
62
|
+
)
|
|
63
|
+
@pytest.mark.parametrize("n_samples", [200, 10000])
|
|
64
|
+
@pytest.mark.parametrize("n_features_and_eps", [(5, 3), (5, 10), (25, 10)])
|
|
65
|
+
@pytest.mark.parametrize("centers", [10, None])
|
|
66
|
+
@pytest.mark.parametrize("min_samples", [2, 5, 15])
|
|
67
|
+
@pytest.mark.parametrize(
|
|
68
|
+
"dataframe,queue",
|
|
69
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
70
|
+
)
|
|
71
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
72
|
+
@pytest.mark.mpi
|
|
73
|
+
def test_dbscan_spmd_synthetic(
|
|
74
|
+
n_samples, n_features_and_eps, centers, min_samples, dataframe, queue, dtype
|
|
75
|
+
):
|
|
76
|
+
n_features, eps = n_features_and_eps
|
|
77
|
+
# Import spmd and batch algo
|
|
78
|
+
from sklearnex.cluster import DBSCAN as DBSCAN_Batch
|
|
79
|
+
from sklearnex.spmd.cluster import DBSCAN as DBSCAN_SPMD
|
|
80
|
+
|
|
81
|
+
data, _ = _generate_clustering_data(
|
|
82
|
+
n_samples, n_features, centers=centers, dtype=dtype
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
local_dpt_data = _convert_to_dataframe(
|
|
86
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Ensure labels from fit of batch algo matches spmd
|
|
90
|
+
spmd_model = DBSCAN_SPMD(eps=eps, min_samples=min_samples).fit(local_dpt_data)
|
|
91
|
+
batch_model = DBSCAN_Batch(eps=eps, min_samples=min_samples).fit(data)
|
|
92
|
+
|
|
93
|
+
_spmd_assert_allclose(spmd_model.labels_, batch_model.labels_)
|
|
94
|
+
|
|
95
|
+
# Ensure meaningful test setup
|
|
96
|
+
if np.all(batch_model.labels_ == -1):
|
|
97
|
+
raise ValueError("No labels given - try raising epsilon")
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from onedal.tests.utils._dataframes_support import (
|
|
22
|
+
_convert_to_dataframe,
|
|
23
|
+
get_dataframes_and_queues,
|
|
24
|
+
)
|
|
25
|
+
from sklearnex.tests._utils_spmd import (
|
|
26
|
+
_assert_kmeans_labels_allclose,
|
|
27
|
+
_assert_unordered_allclose,
|
|
28
|
+
_generate_clustering_data,
|
|
29
|
+
_get_local_tensor,
|
|
30
|
+
_mpi_libs_and_gpu_available,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@pytest.mark.skipif(
|
|
35
|
+
not _mpi_libs_and_gpu_available,
|
|
36
|
+
reason="GPU device and MPI libs required for test",
|
|
37
|
+
)
|
|
38
|
+
@pytest.mark.parametrize(
|
|
39
|
+
"dataframe,queue",
|
|
40
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
41
|
+
)
|
|
42
|
+
@pytest.mark.mpi
|
|
43
|
+
def test_kmeans_spmd_gold(dataframe, queue):
|
|
44
|
+
# Import spmd and batch algo
|
|
45
|
+
from sklearnex.cluster import KMeans as KMeans_Batch
|
|
46
|
+
from sklearnex.spmd.cluster import KMeans as KMeans_SPMD
|
|
47
|
+
|
|
48
|
+
X_train = np.array(
|
|
49
|
+
[
|
|
50
|
+
[1, 2],
|
|
51
|
+
[2, 2],
|
|
52
|
+
[2, 3],
|
|
53
|
+
[8, 7],
|
|
54
|
+
[8, 8],
|
|
55
|
+
[25, 80],
|
|
56
|
+
[5, 65],
|
|
57
|
+
[2, 8],
|
|
58
|
+
[1, 3],
|
|
59
|
+
[2, 2],
|
|
60
|
+
[1, 3],
|
|
61
|
+
[2, 2],
|
|
62
|
+
]
|
|
63
|
+
)
|
|
64
|
+
X_test = np.array([[0, 0], [12, 3], [2, 2], [7, 8]])
|
|
65
|
+
|
|
66
|
+
local_dpt_X_train = _convert_to_dataframe(
|
|
67
|
+
_get_local_tensor(X_train), sycl_queue=queue, target_df=dataframe
|
|
68
|
+
)
|
|
69
|
+
local_dpt_X_test = _convert_to_dataframe(
|
|
70
|
+
_get_local_tensor(X_test), sycl_queue=queue, target_df=dataframe
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Ensure labels from fit of batch algo matches spmd
|
|
74
|
+
spmd_model = KMeans_SPMD(n_clusters=2, random_state=0).fit(local_dpt_X_train)
|
|
75
|
+
batch_model = KMeans_Batch(n_clusters=2, random_state=0).fit(X_train)
|
|
76
|
+
|
|
77
|
+
_assert_unordered_allclose(spmd_model.cluster_centers_, batch_model.cluster_centers_)
|
|
78
|
+
_assert_kmeans_labels_allclose(
|
|
79
|
+
spmd_model.labels_,
|
|
80
|
+
batch_model.labels_,
|
|
81
|
+
spmd_model.cluster_centers_,
|
|
82
|
+
batch_model.cluster_centers_,
|
|
83
|
+
)
|
|
84
|
+
assert_allclose(spmd_model.n_iter_, batch_model.n_iter_, atol=1)
|
|
85
|
+
|
|
86
|
+
# Ensure predictions of batch algo match spmd
|
|
87
|
+
spmd_result = spmd_model.predict(local_dpt_X_test)
|
|
88
|
+
batch_result = batch_model.predict(X_test)
|
|
89
|
+
|
|
90
|
+
_assert_kmeans_labels_allclose(
|
|
91
|
+
spmd_result,
|
|
92
|
+
batch_result,
|
|
93
|
+
spmd_model.cluster_centers_,
|
|
94
|
+
batch_model.cluster_centers_,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@pytest.mark.skipif(
|
|
99
|
+
not _mpi_libs_and_gpu_available,
|
|
100
|
+
reason="GPU device and MPI libs required for test",
|
|
101
|
+
)
|
|
102
|
+
@pytest.mark.parametrize("n_samples", [200, 10000])
|
|
103
|
+
@pytest.mark.parametrize("n_features", [5, 25])
|
|
104
|
+
@pytest.mark.parametrize("n_clusters", [2, 5, 15])
|
|
105
|
+
@pytest.mark.parametrize(
|
|
106
|
+
"dataframe,queue",
|
|
107
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
108
|
+
)
|
|
109
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
110
|
+
@pytest.mark.mpi
|
|
111
|
+
def test_kmeans_spmd_synthetic(
|
|
112
|
+
n_samples, n_features, n_clusters, dataframe, queue, dtype
|
|
113
|
+
):
|
|
114
|
+
# Import spmd and batch algo
|
|
115
|
+
from sklearnex.cluster import KMeans as KMeans_Batch
|
|
116
|
+
from sklearnex.spmd.cluster import KMeans as KMeans_SPMD
|
|
117
|
+
|
|
118
|
+
# TODO: investigate issues when centers != n_clusters (spmd and batch results don't match for all values of K)
|
|
119
|
+
X_train, X_test = _generate_clustering_data(
|
|
120
|
+
n_samples, n_features, centers=n_clusters, dtype=dtype
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
local_dpt_X_train = _convert_to_dataframe(
|
|
124
|
+
_get_local_tensor(X_train), sycl_queue=queue, target_df=dataframe
|
|
125
|
+
)
|
|
126
|
+
local_dpt_X_test = _convert_to_dataframe(
|
|
127
|
+
_get_local_tensor(X_test), sycl_queue=queue, target_df=dataframe
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Validate KMeans init
|
|
131
|
+
spmd_model_init = KMeans_SPMD(n_clusters=n_clusters, max_iter=1, random_state=0).fit(
|
|
132
|
+
local_dpt_X_train
|
|
133
|
+
)
|
|
134
|
+
batch_model_init = KMeans_Batch(
|
|
135
|
+
n_clusters=n_clusters, max_iter=1, random_state=0
|
|
136
|
+
).fit(X_train)
|
|
137
|
+
# TODO: centers do not match up after init
|
|
138
|
+
# _assert_unordered_allclose(spmd_model_init.cluster_centers_, batch_model_init.cluster_centers_)
|
|
139
|
+
|
|
140
|
+
# Ensure labels from fit of batch algo matches spmd, using same init
|
|
141
|
+
spmd_model = KMeans_SPMD(
|
|
142
|
+
n_clusters=n_clusters, init=spmd_model_init.cluster_centers_, random_state=0
|
|
143
|
+
).fit(local_dpt_X_train)
|
|
144
|
+
batch_model = KMeans_Batch(
|
|
145
|
+
n_clusters=n_clusters, init=spmd_model_init.cluster_centers_, random_state=0
|
|
146
|
+
).fit(X_train)
|
|
147
|
+
|
|
148
|
+
atol = 1e-5 if dtype == np.float32 else 1e-7
|
|
149
|
+
_assert_unordered_allclose(
|
|
150
|
+
spmd_model.cluster_centers_, batch_model.cluster_centers_, atol=atol
|
|
151
|
+
)
|
|
152
|
+
_assert_kmeans_labels_allclose(
|
|
153
|
+
spmd_model.labels_,
|
|
154
|
+
batch_model.labels_,
|
|
155
|
+
spmd_model.cluster_centers_,
|
|
156
|
+
batch_model.cluster_centers_,
|
|
157
|
+
atol=atol,
|
|
158
|
+
)
|
|
159
|
+
# TODO: KMeans iterations are not aligned
|
|
160
|
+
# assert_allclose(spmd_model.n_iter_, batch_model.n_iter_, atol=1)
|
|
161
|
+
|
|
162
|
+
# Ensure predictions of batch algo match spmd
|
|
163
|
+
spmd_result = spmd_model.predict(local_dpt_X_test)
|
|
164
|
+
batch_result = batch_model.predict(X_test)
|
|
165
|
+
|
|
166
|
+
_assert_kmeans_labels_allclose(
|
|
167
|
+
spmd_result,
|
|
168
|
+
batch_result,
|
|
169
|
+
spmd_model.cluster_centers_,
|
|
170
|
+
batch_model.cluster_centers_,
|
|
171
|
+
atol=atol,
|
|
172
|
+
)
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from onedal.tests.utils._dataframes_support import (
|
|
22
|
+
_convert_to_dataframe,
|
|
23
|
+
get_dataframes_and_queues,
|
|
24
|
+
)
|
|
25
|
+
from sklearnex.tests._utils_spmd import (
|
|
26
|
+
_generate_statistic_data,
|
|
27
|
+
_get_local_tensor,
|
|
28
|
+
_mpi_libs_and_gpu_available,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@pytest.mark.skipif(
|
|
33
|
+
not _mpi_libs_and_gpu_available,
|
|
34
|
+
reason="GPU device and MPI libs required for test",
|
|
35
|
+
)
|
|
36
|
+
@pytest.mark.parametrize(
|
|
37
|
+
"dataframe,queue",
|
|
38
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
39
|
+
)
|
|
40
|
+
@pytest.mark.mpi
|
|
41
|
+
def test_covariance_spmd_gold(dataframe, queue):
|
|
42
|
+
# Import spmd and batch algo
|
|
43
|
+
from onedal.covariance import EmpiricalCovariance as EmpiricalCovariance_Batch
|
|
44
|
+
from sklearnex.spmd.covariance import EmpiricalCovariance as EmpiricalCovariance_SPMD
|
|
45
|
+
|
|
46
|
+
# Create gold data and convert to dataframe
|
|
47
|
+
data = np.array(
|
|
48
|
+
[
|
|
49
|
+
[0.0, 0.0, 0.0],
|
|
50
|
+
[0.0, 1.0, 2.0],
|
|
51
|
+
[0.0, 2.0, 4.0],
|
|
52
|
+
[0.0, 3.0, 8.0],
|
|
53
|
+
[0.0, 4.0, 16.0],
|
|
54
|
+
[0.0, 5.0, 32.0],
|
|
55
|
+
[0.0, 6.0, 64.0],
|
|
56
|
+
]
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
local_dpt_data = _convert_to_dataframe(
|
|
60
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Ensure results of batch algo match spmd
|
|
64
|
+
spmd_result = EmpiricalCovariance_SPMD().fit(local_dpt_data)
|
|
65
|
+
batch_result = EmpiricalCovariance_Batch().fit(data)
|
|
66
|
+
|
|
67
|
+
assert_allclose(spmd_result.covariance_, batch_result.covariance_)
|
|
68
|
+
assert_allclose(spmd_result.location_, batch_result.location_)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@pytest.mark.skipif(
|
|
72
|
+
not _mpi_libs_and_gpu_available,
|
|
73
|
+
reason="GPU device and MPI libs required for test",
|
|
74
|
+
)
|
|
75
|
+
@pytest.mark.parametrize("n_samples", [100, 10000])
|
|
76
|
+
@pytest.mark.parametrize("n_features", [10, 100])
|
|
77
|
+
@pytest.mark.parametrize("assume_centered", [True, False])
|
|
78
|
+
@pytest.mark.parametrize(
|
|
79
|
+
"dataframe,queue",
|
|
80
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
81
|
+
)
|
|
82
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
83
|
+
@pytest.mark.mpi
|
|
84
|
+
def test_covariance_spmd_synthetic(
|
|
85
|
+
n_samples, n_features, assume_centered, dataframe, queue, dtype
|
|
86
|
+
):
|
|
87
|
+
# Import spmd and batch algo
|
|
88
|
+
# TODO: Align sklearnex spmd to sklearnex estimator with bias and swap onedal with sklearnex
|
|
89
|
+
from onedal.covariance import EmpiricalCovariance as EmpiricalCovariance_Batch
|
|
90
|
+
from sklearnex.spmd.covariance import EmpiricalCovariance as EmpiricalCovariance_SPMD
|
|
91
|
+
|
|
92
|
+
# Generate data and convert to dataframe
|
|
93
|
+
data = _generate_statistic_data(n_samples, n_features, dtype=dtype)
|
|
94
|
+
|
|
95
|
+
local_dpt_data = _convert_to_dataframe(
|
|
96
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Ensure results of batch algo match spmd
|
|
100
|
+
spmd_result = EmpiricalCovariance_SPMD(assume_centered=assume_centered).fit(
|
|
101
|
+
local_dpt_data
|
|
102
|
+
)
|
|
103
|
+
batch_result = EmpiricalCovariance_Batch(assume_centered=assume_centered).fit(data)
|
|
104
|
+
|
|
105
|
+
atol = 1e-5 if dtype == np.float32 else 1e-7
|
|
106
|
+
assert_allclose(spmd_result.covariance_, batch_result.covariance_, atol=atol)
|
|
107
|
+
assert_allclose(spmd_result.location_, batch_result.location_, atol=atol)
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from onedal.tests.utils._dataframes_support import (
|
|
22
|
+
_convert_to_dataframe,
|
|
23
|
+
get_dataframes_and_queues,
|
|
24
|
+
)
|
|
25
|
+
from sklearnex.tests._utils_spmd import (
|
|
26
|
+
_generate_statistic_data,
|
|
27
|
+
_get_local_tensor,
|
|
28
|
+
_mpi_libs_and_gpu_available,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@pytest.mark.skipif(
|
|
33
|
+
not _mpi_libs_and_gpu_available,
|
|
34
|
+
reason="GPU device and MPI libs required for test",
|
|
35
|
+
)
|
|
36
|
+
@pytest.mark.parametrize(
|
|
37
|
+
"dataframe,queue",
|
|
38
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
39
|
+
)
|
|
40
|
+
@pytest.mark.mpi
|
|
41
|
+
def test_pca_spmd_gold(dataframe, queue):
|
|
42
|
+
# Import spmd and batch algo
|
|
43
|
+
from sklearnex.decomposition import PCA as PCA_Batch
|
|
44
|
+
from sklearnex.spmd.decomposition import PCA as PCA_SPMD
|
|
45
|
+
|
|
46
|
+
# Create gold data and convert to dataframe
|
|
47
|
+
data = np.array(
|
|
48
|
+
[
|
|
49
|
+
[0.0, 0.0, 0.0],
|
|
50
|
+
[0.0, 1.0, 2.0],
|
|
51
|
+
[0.0, 2.0, 4.0],
|
|
52
|
+
[0.0, 3.0, 8.0],
|
|
53
|
+
[0.0, 4.0, 16.0],
|
|
54
|
+
[0.0, 5.0, 32.0],
|
|
55
|
+
[0.0, 6.0, 64.0],
|
|
56
|
+
[0.0, 7.0, 128.0],
|
|
57
|
+
]
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
local_dpt_data = _convert_to_dataframe(
|
|
61
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Ensure results of batch algo match spmd
|
|
65
|
+
spmd_result = PCA_SPMD(n_components=2).fit(local_dpt_data)
|
|
66
|
+
batch_result = PCA_Batch(n_components=2).fit(data)
|
|
67
|
+
|
|
68
|
+
assert_allclose(spmd_result.mean_, batch_result.mean_)
|
|
69
|
+
assert_allclose(spmd_result.components_, batch_result.components_)
|
|
70
|
+
assert_allclose(spmd_result.singular_values_, batch_result.singular_values_)
|
|
71
|
+
assert_allclose(
|
|
72
|
+
spmd_result.noise_variance_,
|
|
73
|
+
batch_result.noise_variance_,
|
|
74
|
+
atol=1e-7,
|
|
75
|
+
)
|
|
76
|
+
assert_allclose(
|
|
77
|
+
spmd_result.explained_variance_ratio_, batch_result.explained_variance_ratio_
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@pytest.mark.skipif(
|
|
82
|
+
not _mpi_libs_and_gpu_available,
|
|
83
|
+
reason="GPU device and MPI libs required for test",
|
|
84
|
+
)
|
|
85
|
+
@pytest.mark.parametrize("n_samples", [100, 10000])
|
|
86
|
+
@pytest.mark.parametrize("n_features", [10, 100])
|
|
87
|
+
@pytest.mark.parametrize("n_components", [0.5, 3, "mle", None])
|
|
88
|
+
@pytest.mark.parametrize("whiten", [True, False])
|
|
89
|
+
@pytest.mark.parametrize(
|
|
90
|
+
"dataframe,queue",
|
|
91
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
92
|
+
)
|
|
93
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
94
|
+
@pytest.mark.mpi
|
|
95
|
+
def test_pca_spmd_synthetic(
|
|
96
|
+
n_samples, n_features, n_components, whiten, dataframe, queue, dtype
|
|
97
|
+
):
|
|
98
|
+
# TODO: Resolve issues with batch fallback and lack of support for n_rows_rank < n_cols
|
|
99
|
+
if n_components == "mle" or n_components == 3:
|
|
100
|
+
pytest.skip("Avoid error in case of batch fallback to sklearn")
|
|
101
|
+
if n_samples <= n_features:
|
|
102
|
+
pytest.skip("Avoid n_samples < n_features error from spmd data split")
|
|
103
|
+
|
|
104
|
+
# Import spmd and batch algo
|
|
105
|
+
from sklearnex.decomposition import PCA as PCA_Batch
|
|
106
|
+
from sklearnex.spmd.decomposition import PCA as PCA_SPMD
|
|
107
|
+
|
|
108
|
+
# Generate data and convert to dataframe
|
|
109
|
+
data = _generate_statistic_data(n_samples, n_features, dtype=dtype)
|
|
110
|
+
|
|
111
|
+
local_dpt_data = _convert_to_dataframe(
|
|
112
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Ensure results of batch algo match spmd
|
|
116
|
+
spmd_result = PCA_SPMD(n_components=n_components, whiten=whiten).fit(local_dpt_data)
|
|
117
|
+
batch_result = PCA_Batch(n_components=n_components, whiten=whiten).fit(data)
|
|
118
|
+
|
|
119
|
+
tol = 1e-3 if dtype == np.float32 else 1e-7
|
|
120
|
+
assert_allclose(spmd_result.mean_, batch_result.mean_, atol=tol)
|
|
121
|
+
assert_allclose(spmd_result.components_, batch_result.components_, atol=tol, rtol=tol)
|
|
122
|
+
assert_allclose(spmd_result.singular_values_, batch_result.singular_values_, atol=tol)
|
|
123
|
+
assert_allclose(spmd_result.noise_variance_, batch_result.noise_variance_, atol=tol)
|
|
124
|
+
assert_allclose(
|
|
125
|
+
spmd_result.explained_variance_ratio_,
|
|
126
|
+
batch_result.explained_variance_ratio_,
|
|
127
|
+
atol=tol,
|
|
128
|
+
)
|