scikit-learn-intelex 2024.6.0__py310-none-manylinux1_x86_64.whl → 2024.7.0__py310-none-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of scikit-learn-intelex might be problematic. Click here for more details.
- {scikit_learn_intelex-2024.6.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/METADATA +2 -2
- {scikit_learn_intelex-2024.6.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/RECORD +55 -41
- sklearnex/_config.py +3 -15
- sklearnex/_device_offload.py +9 -168
- sklearnex/basic_statistics/basic_statistics.py +127 -1
- sklearnex/basic_statistics/tests/test_basic_statistics.py +251 -0
- sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +1 -1
- sklearnex/cluster/dbscan.py +0 -1
- sklearnex/cluster/k_means.py +8 -0
- sklearnex/cluster/tests/test_kmeans.py +15 -3
- sklearnex/covariance/incremental_covariance.py +64 -13
- sklearnex/covariance/tests/test_incremental_covariance.py +35 -0
- sklearnex/decomposition/pca.py +25 -1
- sklearnex/dispatcher.py +94 -0
- sklearnex/ensemble/_forest.py +8 -35
- sklearnex/ensemble/tests/test_forest.py +9 -12
- sklearnex/linear_model/coordinate_descent.py +13 -0
- sklearnex/linear_model/linear.py +2 -34
- sklearnex/linear_model/logistic_regression.py +79 -59
- sklearnex/linear_model/ridge.py +7 -0
- sklearnex/linear_model/tests/test_linear.py +28 -3
- sklearnex/linear_model/tests/test_logreg.py +45 -3
- sklearnex/manifold/t_sne.py +4 -0
- sklearnex/metrics/pairwise.py +5 -0
- sklearnex/metrics/ranking.py +3 -0
- sklearnex/model_selection/split.py +3 -0
- sklearnex/neighbors/_lof.py +9 -0
- sklearnex/neighbors/common.py +45 -1
- sklearnex/neighbors/knn_classification.py +1 -20
- sklearnex/neighbors/knn_regression.py +1 -20
- sklearnex/neighbors/knn_unsupervised.py +31 -7
- sklearnex/preview/__init__.py +1 -1
- sklearnex/preview/linear_model/__init__.py +19 -0
- sklearnex/preview/linear_model/ridge.py +419 -0
- sklearnex/preview/linear_model/tests/test_ridge.py +102 -0
- sklearnex/spmd/basic_statistics/tests/test_basic_statistics_spmd.py +107 -0
- sklearnex/spmd/cluster/tests/test_dbscan_spmd.py +97 -0
- sklearnex/spmd/cluster/tests/test_kmeans_spmd.py +172 -0
- sklearnex/spmd/covariance/tests/test_covariance_spmd.py +107 -0
- sklearnex/spmd/decomposition/tests/test_pca_spmd.py +128 -0
- sklearnex/spmd/ensemble/tests/test_forest_spmd.py +265 -0
- sklearnex/spmd/linear_model/tests/test_linear_regression_spmd.py +145 -0
- sklearnex/spmd/linear_model/tests/test_logistic_regression_spmd.py +163 -0
- sklearnex/spmd/neighbors/tests/test_neighbors_spmd.py +288 -0
- sklearnex/svm/_common.py +19 -21
- sklearnex/svm/tests/test_svm.py +12 -20
- sklearnex/tests/_utils.py +143 -20
- sklearnex/tests/_utils_spmd.py +185 -0
- sklearnex/tests/test_config.py +4 -0
- sklearnex/tests/test_monkeypatch.py +12 -4
- sklearnex/tests/test_patching.py +16 -13
- sklearnex/tests/test_run_to_run_stability.py +21 -9
- {scikit_learn_intelex-2024.6.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/LICENSE.txt +0 -0
- {scikit_learn_intelex-2024.6.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/WHEEL +0 -0
- {scikit_learn_intelex-2024.6.0.dist-info → scikit_learn_intelex-2024.7.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# ===============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ===============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
from sklearn.exceptions import NotFittedError
|
|
21
|
+
|
|
22
|
+
from daal4py.sklearn._utils import daal_check_version
|
|
23
|
+
from onedal.tests.utils._dataframes_support import (
|
|
24
|
+
_convert_to_dataframe,
|
|
25
|
+
get_dataframes_and_queues,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
30
|
+
def test_sklearnex_import_ridge(dataframe, queue):
|
|
31
|
+
from sklearnex.preview.linear_model import Ridge
|
|
32
|
+
|
|
33
|
+
X = numpy.array([[1, 1], [1, 2], [2, 2], [2, 3]])
|
|
34
|
+
y = numpy.dot(X, numpy.array([1, 2])) + 3
|
|
35
|
+
X_c = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
36
|
+
y_c = _convert_to_dataframe(y, sycl_queue=queue, target_df=dataframe)
|
|
37
|
+
ridge_reg = Ridge(alpha=0.5).fit(X_c, y_c)
|
|
38
|
+
|
|
39
|
+
if daal_check_version((2024, "P", 600)):
|
|
40
|
+
assert "preview" in ridge_reg.__module__
|
|
41
|
+
else:
|
|
42
|
+
assert "daal4py" in ridge_reg.__module__
|
|
43
|
+
|
|
44
|
+
assert_allclose(ridge_reg.intercept_, 3.86, rtol=1e-2)
|
|
45
|
+
assert_allclose(ridge_reg.coef_, [0.91, 1.64], rtol=1e-2)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
49
|
+
@pytest.mark.parametrize("sample_size", [100, 1000])
|
|
50
|
+
@pytest.mark.parametrize("feature_size", [10, 50])
|
|
51
|
+
@pytest.mark.parametrize("alpha", [0.1, 0.5, 1.0])
|
|
52
|
+
def test_ridge_coefficients(dataframe, queue, sample_size, feature_size, alpha):
|
|
53
|
+
from sklearnex.preview.linear_model import Ridge
|
|
54
|
+
|
|
55
|
+
X = numpy.random.rand(sample_size, feature_size)
|
|
56
|
+
y = numpy.random.rand(sample_size)
|
|
57
|
+
X_c = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
58
|
+
y_c = _convert_to_dataframe(y, sycl_queue=queue, target_df=dataframe)
|
|
59
|
+
ridge_reg = Ridge(fit_intercept=False, alpha=alpha).fit(X_c, y_c)
|
|
60
|
+
|
|
61
|
+
# computing the coefficients manually
|
|
62
|
+
# using the normal equation formula: (X^T * X + lambda * I)^-1 * X^T * y
|
|
63
|
+
lambda_identity = alpha * numpy.eye(X.shape[1])
|
|
64
|
+
inverse_term = numpy.linalg.inv(numpy.dot(X.T, X) + lambda_identity)
|
|
65
|
+
xt_y = numpy.dot(X.T, y)
|
|
66
|
+
coefficients_manual = numpy.dot(inverse_term, xt_y)
|
|
67
|
+
|
|
68
|
+
assert_allclose(ridge_reg.coef_, coefficients_manual, rtol=1e-6, atol=1e-6)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
if daal_check_version((2024, "P", 600)):
|
|
72
|
+
|
|
73
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
74
|
+
def test_ridge_score_before_fit(dataframe, queue):
|
|
75
|
+
from sklearnex.preview.linear_model import Ridge
|
|
76
|
+
|
|
77
|
+
sample_count, feature_count = 10, 5
|
|
78
|
+
|
|
79
|
+
model = Ridge(fit_intercept=True, alpha=0.5)
|
|
80
|
+
|
|
81
|
+
X, y = numpy.random.rand(sample_count, feature_count), numpy.random.rand(
|
|
82
|
+
sample_count
|
|
83
|
+
)
|
|
84
|
+
X_c = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
85
|
+
y_c = _convert_to_dataframe(y, sycl_queue=queue, target_df=dataframe)
|
|
86
|
+
|
|
87
|
+
with pytest.raises(NotFittedError):
|
|
88
|
+
model.score(X_c, y_c)
|
|
89
|
+
|
|
90
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
91
|
+
def test_ridge_predict_before_fit(dataframe, queue):
|
|
92
|
+
from sklearnex.preview.linear_model import Ridge
|
|
93
|
+
|
|
94
|
+
sample_count, feature_count = 10, 5
|
|
95
|
+
|
|
96
|
+
model = Ridge(fit_intercept=True, alpha=0.5)
|
|
97
|
+
|
|
98
|
+
X = numpy.random.rand(sample_count, feature_count)
|
|
99
|
+
X_c = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
100
|
+
|
|
101
|
+
with pytest.raises(NotFittedError):
|
|
102
|
+
model.predict(X_c)
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from onedal.basic_statistics.tests.test_basic_statistics import options_and_tests
|
|
22
|
+
from onedal.tests.utils._dataframes_support import (
|
|
23
|
+
_convert_to_dataframe,
|
|
24
|
+
get_dataframes_and_queues,
|
|
25
|
+
)
|
|
26
|
+
from sklearnex.tests._utils_spmd import (
|
|
27
|
+
_generate_statistic_data,
|
|
28
|
+
_get_local_tensor,
|
|
29
|
+
_mpi_libs_and_gpu_available,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@pytest.mark.skipif(
|
|
34
|
+
not _mpi_libs_and_gpu_available,
|
|
35
|
+
reason="GPU device and MPI libs required for test",
|
|
36
|
+
)
|
|
37
|
+
@pytest.mark.parametrize(
|
|
38
|
+
"dataframe,queue",
|
|
39
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
40
|
+
)
|
|
41
|
+
@pytest.mark.mpi
|
|
42
|
+
def test_basic_stats_spmd_gold(dataframe, queue):
|
|
43
|
+
# Import spmd and batch algo
|
|
44
|
+
from onedal.basic_statistics import BasicStatistics as BasicStatistics_Batch
|
|
45
|
+
from sklearnex.spmd.basic_statistics import BasicStatistics as BasicStatistics_SPMD
|
|
46
|
+
|
|
47
|
+
# Create gold data and convert to dataframe
|
|
48
|
+
data = np.array(
|
|
49
|
+
[
|
|
50
|
+
[0.0, 0.0, 0.0],
|
|
51
|
+
[0.0, 1.0, 2.0],
|
|
52
|
+
[0.0, 2.0, 4.0],
|
|
53
|
+
[0.0, 3.0, 8.0],
|
|
54
|
+
[0.0, 4.0, 16.0],
|
|
55
|
+
[0.0, 5.0, 32.0],
|
|
56
|
+
[0.0, 6.0, 64.0],
|
|
57
|
+
]
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
local_dpt_data = _convert_to_dataframe(
|
|
61
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Ensure results of batch algo match spmd
|
|
65
|
+
spmd_result = BasicStatistics_SPMD().fit(local_dpt_data)
|
|
66
|
+
batch_result = BasicStatistics_Batch().fit(data)
|
|
67
|
+
|
|
68
|
+
for option in (opt[0] for opt in options_and_tests):
|
|
69
|
+
assert_allclose(getattr(spmd_result, option), getattr(batch_result, option))
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@pytest.mark.skipif(
|
|
73
|
+
not _mpi_libs_and_gpu_available,
|
|
74
|
+
reason="GPU device and MPI libs required for test",
|
|
75
|
+
)
|
|
76
|
+
@pytest.mark.parametrize("n_samples", [100, 10000])
|
|
77
|
+
@pytest.mark.parametrize("n_features", [10, 100])
|
|
78
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
79
|
+
@pytest.mark.parametrize(
|
|
80
|
+
"dataframe,queue",
|
|
81
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
82
|
+
)
|
|
83
|
+
@pytest.mark.mpi
|
|
84
|
+
def test_basic_stats_spmd_synthetic(n_samples, n_features, dataframe, queue, dtype):
|
|
85
|
+
# Import spmd and batch algo
|
|
86
|
+
from onedal.basic_statistics import BasicStatistics as BasicStatistics_Batch
|
|
87
|
+
from sklearnex.spmd.basic_statistics import BasicStatistics as BasicStatistics_SPMD
|
|
88
|
+
|
|
89
|
+
# Generate data and convert to dataframe
|
|
90
|
+
data = _generate_statistic_data(n_samples, n_features, dtype=dtype)
|
|
91
|
+
|
|
92
|
+
local_dpt_data = _convert_to_dataframe(
|
|
93
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Ensure results of batch algo match spmd
|
|
97
|
+
spmd_result = BasicStatistics_SPMD().fit(local_dpt_data)
|
|
98
|
+
batch_result = BasicStatistics_Batch().fit(data)
|
|
99
|
+
|
|
100
|
+
tol = 1e-5 if dtype == np.float32 else 1e-7
|
|
101
|
+
for option in (opt[0] for opt in options_and_tests):
|
|
102
|
+
assert_allclose(
|
|
103
|
+
getattr(spmd_result, option),
|
|
104
|
+
getattr(batch_result, option),
|
|
105
|
+
atol=tol,
|
|
106
|
+
rtol=tol,
|
|
107
|
+
)
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
|
|
20
|
+
from onedal.tests.utils._dataframes_support import (
|
|
21
|
+
_convert_to_dataframe,
|
|
22
|
+
get_dataframes_and_queues,
|
|
23
|
+
)
|
|
24
|
+
from sklearnex.tests._utils_spmd import (
|
|
25
|
+
_generate_clustering_data,
|
|
26
|
+
_get_local_tensor,
|
|
27
|
+
_mpi_libs_and_gpu_available,
|
|
28
|
+
_spmd_assert_allclose,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@pytest.mark.skipif(
|
|
33
|
+
not _mpi_libs_and_gpu_available,
|
|
34
|
+
reason="GPU device and MPI libs required for test",
|
|
35
|
+
)
|
|
36
|
+
@pytest.mark.parametrize(
|
|
37
|
+
"dataframe,queue",
|
|
38
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
39
|
+
)
|
|
40
|
+
@pytest.mark.mpi
|
|
41
|
+
def test_dbscan_spmd_gold(dataframe, queue):
|
|
42
|
+
# Import spmd and batch algo
|
|
43
|
+
from sklearnex.cluster import DBSCAN as DBSCAN_Batch
|
|
44
|
+
from sklearnex.spmd.cluster import DBSCAN as DBSCAN_SPMD
|
|
45
|
+
|
|
46
|
+
data = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]])
|
|
47
|
+
|
|
48
|
+
local_dpt_data = _convert_to_dataframe(
|
|
49
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Ensure labels from fit of batch algo matches spmd
|
|
53
|
+
spmd_model = DBSCAN_SPMD(eps=3, min_samples=2).fit(local_dpt_data)
|
|
54
|
+
batch_model = DBSCAN_Batch(eps=3, min_samples=2).fit(data)
|
|
55
|
+
|
|
56
|
+
_spmd_assert_allclose(spmd_model.labels_, batch_model.labels_)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@pytest.mark.skipif(
|
|
60
|
+
not _mpi_libs_and_gpu_available,
|
|
61
|
+
reason="GPU device and MPI libs required for test",
|
|
62
|
+
)
|
|
63
|
+
@pytest.mark.parametrize("n_samples", [200, 10000])
|
|
64
|
+
@pytest.mark.parametrize("n_features_and_eps", [(5, 3), (5, 10), (25, 10)])
|
|
65
|
+
@pytest.mark.parametrize("centers", [10, None])
|
|
66
|
+
@pytest.mark.parametrize("min_samples", [2, 5, 15])
|
|
67
|
+
@pytest.mark.parametrize(
|
|
68
|
+
"dataframe,queue",
|
|
69
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
70
|
+
)
|
|
71
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
72
|
+
@pytest.mark.mpi
|
|
73
|
+
def test_dbscan_spmd_synthetic(
|
|
74
|
+
n_samples, n_features_and_eps, centers, min_samples, dataframe, queue, dtype
|
|
75
|
+
):
|
|
76
|
+
n_features, eps = n_features_and_eps
|
|
77
|
+
# Import spmd and batch algo
|
|
78
|
+
from sklearnex.cluster import DBSCAN as DBSCAN_Batch
|
|
79
|
+
from sklearnex.spmd.cluster import DBSCAN as DBSCAN_SPMD
|
|
80
|
+
|
|
81
|
+
data, _ = _generate_clustering_data(
|
|
82
|
+
n_samples, n_features, centers=centers, dtype=dtype
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
local_dpt_data = _convert_to_dataframe(
|
|
86
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Ensure labels from fit of batch algo matches spmd
|
|
90
|
+
spmd_model = DBSCAN_SPMD(eps=eps, min_samples=min_samples).fit(local_dpt_data)
|
|
91
|
+
batch_model = DBSCAN_Batch(eps=eps, min_samples=min_samples).fit(data)
|
|
92
|
+
|
|
93
|
+
_spmd_assert_allclose(spmd_model.labels_, batch_model.labels_)
|
|
94
|
+
|
|
95
|
+
# Ensure meaningful test setup
|
|
96
|
+
if np.all(batch_model.labels_ == -1):
|
|
97
|
+
raise ValueError("No labels given - try raising epsilon")
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from onedal.tests.utils._dataframes_support import (
|
|
22
|
+
_convert_to_dataframe,
|
|
23
|
+
get_dataframes_and_queues,
|
|
24
|
+
)
|
|
25
|
+
from sklearnex.tests._utils_spmd import (
|
|
26
|
+
_assert_kmeans_labels_allclose,
|
|
27
|
+
_assert_unordered_allclose,
|
|
28
|
+
_generate_clustering_data,
|
|
29
|
+
_get_local_tensor,
|
|
30
|
+
_mpi_libs_and_gpu_available,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@pytest.mark.skipif(
|
|
35
|
+
not _mpi_libs_and_gpu_available,
|
|
36
|
+
reason="GPU device and MPI libs required for test",
|
|
37
|
+
)
|
|
38
|
+
@pytest.mark.parametrize(
|
|
39
|
+
"dataframe,queue",
|
|
40
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
41
|
+
)
|
|
42
|
+
@pytest.mark.mpi
|
|
43
|
+
def test_kmeans_spmd_gold(dataframe, queue):
|
|
44
|
+
# Import spmd and batch algo
|
|
45
|
+
from sklearnex.cluster import KMeans as KMeans_Batch
|
|
46
|
+
from sklearnex.spmd.cluster import KMeans as KMeans_SPMD
|
|
47
|
+
|
|
48
|
+
X_train = np.array(
|
|
49
|
+
[
|
|
50
|
+
[1, 2],
|
|
51
|
+
[2, 2],
|
|
52
|
+
[2, 3],
|
|
53
|
+
[8, 7],
|
|
54
|
+
[8, 8],
|
|
55
|
+
[25, 80],
|
|
56
|
+
[5, 65],
|
|
57
|
+
[2, 8],
|
|
58
|
+
[1, 3],
|
|
59
|
+
[2, 2],
|
|
60
|
+
[1, 3],
|
|
61
|
+
[2, 2],
|
|
62
|
+
]
|
|
63
|
+
)
|
|
64
|
+
X_test = np.array([[0, 0], [12, 3], [2, 2], [7, 8]])
|
|
65
|
+
|
|
66
|
+
local_dpt_X_train = _convert_to_dataframe(
|
|
67
|
+
_get_local_tensor(X_train), sycl_queue=queue, target_df=dataframe
|
|
68
|
+
)
|
|
69
|
+
local_dpt_X_test = _convert_to_dataframe(
|
|
70
|
+
_get_local_tensor(X_test), sycl_queue=queue, target_df=dataframe
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Ensure labels from fit of batch algo matches spmd
|
|
74
|
+
spmd_model = KMeans_SPMD(n_clusters=2, random_state=0).fit(local_dpt_X_train)
|
|
75
|
+
batch_model = KMeans_Batch(n_clusters=2, random_state=0).fit(X_train)
|
|
76
|
+
|
|
77
|
+
_assert_unordered_allclose(spmd_model.cluster_centers_, batch_model.cluster_centers_)
|
|
78
|
+
_assert_kmeans_labels_allclose(
|
|
79
|
+
spmd_model.labels_,
|
|
80
|
+
batch_model.labels_,
|
|
81
|
+
spmd_model.cluster_centers_,
|
|
82
|
+
batch_model.cluster_centers_,
|
|
83
|
+
)
|
|
84
|
+
assert_allclose(spmd_model.n_iter_, batch_model.n_iter_, atol=1)
|
|
85
|
+
|
|
86
|
+
# Ensure predictions of batch algo match spmd
|
|
87
|
+
spmd_result = spmd_model.predict(local_dpt_X_test)
|
|
88
|
+
batch_result = batch_model.predict(X_test)
|
|
89
|
+
|
|
90
|
+
_assert_kmeans_labels_allclose(
|
|
91
|
+
spmd_result,
|
|
92
|
+
batch_result,
|
|
93
|
+
spmd_model.cluster_centers_,
|
|
94
|
+
batch_model.cluster_centers_,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@pytest.mark.skipif(
|
|
99
|
+
not _mpi_libs_and_gpu_available,
|
|
100
|
+
reason="GPU device and MPI libs required for test",
|
|
101
|
+
)
|
|
102
|
+
@pytest.mark.parametrize("n_samples", [200, 10000])
|
|
103
|
+
@pytest.mark.parametrize("n_features", [5, 25])
|
|
104
|
+
@pytest.mark.parametrize("n_clusters", [2, 5, 15])
|
|
105
|
+
@pytest.mark.parametrize(
|
|
106
|
+
"dataframe,queue",
|
|
107
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
108
|
+
)
|
|
109
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
110
|
+
@pytest.mark.mpi
|
|
111
|
+
def test_kmeans_spmd_synthetic(
|
|
112
|
+
n_samples, n_features, n_clusters, dataframe, queue, dtype
|
|
113
|
+
):
|
|
114
|
+
# Import spmd and batch algo
|
|
115
|
+
from sklearnex.cluster import KMeans as KMeans_Batch
|
|
116
|
+
from sklearnex.spmd.cluster import KMeans as KMeans_SPMD
|
|
117
|
+
|
|
118
|
+
# TODO: investigate issues when centers != n_clusters (spmd and batch results don't match for all values of K)
|
|
119
|
+
X_train, X_test = _generate_clustering_data(
|
|
120
|
+
n_samples, n_features, centers=n_clusters, dtype=dtype
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
local_dpt_X_train = _convert_to_dataframe(
|
|
124
|
+
_get_local_tensor(X_train), sycl_queue=queue, target_df=dataframe
|
|
125
|
+
)
|
|
126
|
+
local_dpt_X_test = _convert_to_dataframe(
|
|
127
|
+
_get_local_tensor(X_test), sycl_queue=queue, target_df=dataframe
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Validate KMeans init
|
|
131
|
+
spmd_model_init = KMeans_SPMD(n_clusters=n_clusters, max_iter=1, random_state=0).fit(
|
|
132
|
+
local_dpt_X_train
|
|
133
|
+
)
|
|
134
|
+
batch_model_init = KMeans_Batch(
|
|
135
|
+
n_clusters=n_clusters, max_iter=1, random_state=0
|
|
136
|
+
).fit(X_train)
|
|
137
|
+
# TODO: centers do not match up after init
|
|
138
|
+
# _assert_unordered_allclose(spmd_model_init.cluster_centers_, batch_model_init.cluster_centers_)
|
|
139
|
+
|
|
140
|
+
# Ensure labels from fit of batch algo matches spmd, using same init
|
|
141
|
+
spmd_model = KMeans_SPMD(
|
|
142
|
+
n_clusters=n_clusters, init=spmd_model_init.cluster_centers_, random_state=0
|
|
143
|
+
).fit(local_dpt_X_train)
|
|
144
|
+
batch_model = KMeans_Batch(
|
|
145
|
+
n_clusters=n_clusters, init=spmd_model_init.cluster_centers_, random_state=0
|
|
146
|
+
).fit(X_train)
|
|
147
|
+
|
|
148
|
+
atol = 1e-5 if dtype == np.float32 else 1e-7
|
|
149
|
+
_assert_unordered_allclose(
|
|
150
|
+
spmd_model.cluster_centers_, batch_model.cluster_centers_, atol=atol
|
|
151
|
+
)
|
|
152
|
+
_assert_kmeans_labels_allclose(
|
|
153
|
+
spmd_model.labels_,
|
|
154
|
+
batch_model.labels_,
|
|
155
|
+
spmd_model.cluster_centers_,
|
|
156
|
+
batch_model.cluster_centers_,
|
|
157
|
+
atol=atol,
|
|
158
|
+
)
|
|
159
|
+
# TODO: KMeans iterations are not aligned
|
|
160
|
+
# assert_allclose(spmd_model.n_iter_, batch_model.n_iter_, atol=1)
|
|
161
|
+
|
|
162
|
+
# Ensure predictions of batch algo match spmd
|
|
163
|
+
spmd_result = spmd_model.predict(local_dpt_X_test)
|
|
164
|
+
batch_result = batch_model.predict(X_test)
|
|
165
|
+
|
|
166
|
+
_assert_kmeans_labels_allclose(
|
|
167
|
+
spmd_result,
|
|
168
|
+
batch_result,
|
|
169
|
+
spmd_model.cluster_centers_,
|
|
170
|
+
batch_model.cluster_centers_,
|
|
171
|
+
atol=atol,
|
|
172
|
+
)
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from onedal.tests.utils._dataframes_support import (
|
|
22
|
+
_convert_to_dataframe,
|
|
23
|
+
get_dataframes_and_queues,
|
|
24
|
+
)
|
|
25
|
+
from sklearnex.tests._utils_spmd import (
|
|
26
|
+
_generate_statistic_data,
|
|
27
|
+
_get_local_tensor,
|
|
28
|
+
_mpi_libs_and_gpu_available,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@pytest.mark.skipif(
|
|
33
|
+
not _mpi_libs_and_gpu_available,
|
|
34
|
+
reason="GPU device and MPI libs required for test",
|
|
35
|
+
)
|
|
36
|
+
@pytest.mark.parametrize(
|
|
37
|
+
"dataframe,queue",
|
|
38
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
39
|
+
)
|
|
40
|
+
@pytest.mark.mpi
|
|
41
|
+
def test_covariance_spmd_gold(dataframe, queue):
|
|
42
|
+
# Import spmd and batch algo
|
|
43
|
+
from onedal.covariance import EmpiricalCovariance as EmpiricalCovariance_Batch
|
|
44
|
+
from sklearnex.spmd.covariance import EmpiricalCovariance as EmpiricalCovariance_SPMD
|
|
45
|
+
|
|
46
|
+
# Create gold data and convert to dataframe
|
|
47
|
+
data = np.array(
|
|
48
|
+
[
|
|
49
|
+
[0.0, 0.0, 0.0],
|
|
50
|
+
[0.0, 1.0, 2.0],
|
|
51
|
+
[0.0, 2.0, 4.0],
|
|
52
|
+
[0.0, 3.0, 8.0],
|
|
53
|
+
[0.0, 4.0, 16.0],
|
|
54
|
+
[0.0, 5.0, 32.0],
|
|
55
|
+
[0.0, 6.0, 64.0],
|
|
56
|
+
]
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
local_dpt_data = _convert_to_dataframe(
|
|
60
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Ensure results of batch algo match spmd
|
|
64
|
+
spmd_result = EmpiricalCovariance_SPMD().fit(local_dpt_data)
|
|
65
|
+
batch_result = EmpiricalCovariance_Batch().fit(data)
|
|
66
|
+
|
|
67
|
+
assert_allclose(spmd_result.covariance_, batch_result.covariance_)
|
|
68
|
+
assert_allclose(spmd_result.location_, batch_result.location_)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@pytest.mark.skipif(
|
|
72
|
+
not _mpi_libs_and_gpu_available,
|
|
73
|
+
reason="GPU device and MPI libs required for test",
|
|
74
|
+
)
|
|
75
|
+
@pytest.mark.parametrize("n_samples", [100, 10000])
|
|
76
|
+
@pytest.mark.parametrize("n_features", [10, 100])
|
|
77
|
+
@pytest.mark.parametrize("assume_centered", [True, False])
|
|
78
|
+
@pytest.mark.parametrize(
|
|
79
|
+
"dataframe,queue",
|
|
80
|
+
get_dataframes_and_queues(dataframe_filter_="dpnp,dpctl", device_filter_="gpu"),
|
|
81
|
+
)
|
|
82
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
83
|
+
@pytest.mark.mpi
|
|
84
|
+
def test_covariance_spmd_synthetic(
|
|
85
|
+
n_samples, n_features, assume_centered, dataframe, queue, dtype
|
|
86
|
+
):
|
|
87
|
+
# Import spmd and batch algo
|
|
88
|
+
# TODO: Align sklearnex spmd to sklearnex estimator with bias and swap onedal with sklearnex
|
|
89
|
+
from onedal.covariance import EmpiricalCovariance as EmpiricalCovariance_Batch
|
|
90
|
+
from sklearnex.spmd.covariance import EmpiricalCovariance as EmpiricalCovariance_SPMD
|
|
91
|
+
|
|
92
|
+
# Generate data and convert to dataframe
|
|
93
|
+
data = _generate_statistic_data(n_samples, n_features, dtype=dtype)
|
|
94
|
+
|
|
95
|
+
local_dpt_data = _convert_to_dataframe(
|
|
96
|
+
_get_local_tensor(data), sycl_queue=queue, target_df=dataframe
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Ensure results of batch algo match spmd
|
|
100
|
+
spmd_result = EmpiricalCovariance_SPMD(assume_centered=assume_centered).fit(
|
|
101
|
+
local_dpt_data
|
|
102
|
+
)
|
|
103
|
+
batch_result = EmpiricalCovariance_Batch(assume_centered=assume_centered).fit(data)
|
|
104
|
+
|
|
105
|
+
atol = 1e-5 if dtype == np.float32 else 1e-7
|
|
106
|
+
assert_allclose(spmd_result.covariance_, batch_result.covariance_, atol=atol)
|
|
107
|
+
assert_allclose(spmd_result.location_, batch_result.location_, atol=atol)
|