scikit-learn-intelex 2024.5.0__py310-none-manylinux1_x86_64.whl → 2024.6.0__py310-none-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of scikit-learn-intelex might be problematic. Click here for more details.

Files changed (35) hide show
  1. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/METADATA +2 -2
  2. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/RECORD +34 -30
  3. sklearnex/cluster/dbscan.py +3 -0
  4. sklearnex/cluster/tests/test_dbscan.py +8 -6
  5. sklearnex/conftest.py +11 -1
  6. sklearnex/decomposition/tests/test_pca.py +4 -2
  7. sklearnex/dispatcher.py +15 -1
  8. sklearnex/ensemble/_forest.py +114 -23
  9. sklearnex/ensemble/tests/test_forest.py +13 -3
  10. sklearnex/glob/dispatcher.py +16 -2
  11. sklearnex/linear_model/incremental_linear.py +102 -25
  12. sklearnex/linear_model/linear.py +25 -7
  13. sklearnex/linear_model/logistic_regression.py +13 -15
  14. sklearnex/linear_model/tests/test_incremental_linear.py +10 -10
  15. sklearnex/linear_model/tests/test_linear.py +2 -2
  16. sklearnex/neighbors/knn_regression.py +24 -0
  17. sklearnex/preview/__init__.py +1 -1
  18. sklearnex/preview/decomposition/__init__.py +19 -0
  19. sklearnex/preview/decomposition/incremental_pca.py +228 -0
  20. sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
  21. sklearnex/svm/_common.py +165 -20
  22. sklearnex/svm/nusvc.py +40 -4
  23. sklearnex/svm/nusvr.py +31 -2
  24. sklearnex/svm/svc.py +40 -4
  25. sklearnex/svm/svr.py +31 -2
  26. sklearnex/tests/_utils.py +49 -17
  27. sklearnex/tests/test_common.py +54 -0
  28. sklearnex/tests/test_memory_usage.py +185 -126
  29. sklearnex/tests/test_patching.py +5 -12
  30. sklearnex/tests/test_run_to_run_stability.py +283 -0
  31. sklearnex/utils/_namespace.py +1 -1
  32. sklearnex/tests/test_run_to_run_stability_tests.py +0 -428
  33. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/LICENSE.txt +0 -0
  34. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/WHEEL +0 -0
  35. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,228 @@
1
+ # ===============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ import numpy as np
18
+ from sklearn.decomposition import IncrementalPCA as sklearn_IncrementalPCA
19
+ from sklearn.utils import check_array, gen_batches
20
+
21
+ from daal4py.sklearn._n_jobs_support import control_n_jobs
22
+ from daal4py.sklearn._utils import sklearn_check_version
23
+ from onedal.decomposition import IncrementalPCA as onedal_IncrementalPCA
24
+
25
+ from ..._device_offload import dispatch, wrap_output_data
26
+ from ..._utils import PatchingConditionsChain
27
+
28
+
29
+ @control_n_jobs(
30
+ decorated_methods=["fit", "partial_fit", "transform", "_onedal_finalize_fit"]
31
+ )
32
+ class IncrementalPCA(sklearn_IncrementalPCA):
33
+
34
+ def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None):
35
+ super().__init__(
36
+ n_components=n_components, whiten=whiten, copy=copy, batch_size=batch_size
37
+ )
38
+ self._need_to_finalize = False
39
+ self._need_to_finalize_attrs = {
40
+ "mean_",
41
+ "explained_variance_",
42
+ "explained_variance_ratio_",
43
+ "n_components_",
44
+ "components_",
45
+ "noise_variance_",
46
+ "singular_values_",
47
+ "var_",
48
+ }
49
+
50
+ _onedal_incremental_pca = staticmethod(onedal_IncrementalPCA)
51
+
52
+ def _onedal_transform(self, X, queue=None):
53
+ assert hasattr(self, "_onedal_estimator")
54
+ if self._need_to_finalize:
55
+ self._onedal_finalize_fit()
56
+ X = check_array(X, dtype=[np.float64, np.float32])
57
+ return self._onedal_estimator.predict(X, queue)
58
+
59
+ def _onedal_fit_transform(self, X, queue=None):
60
+ self._onedal_fit(X, queue)
61
+ return self._onedal_transform(X, queue)
62
+
63
+ def _onedal_partial_fit(self, X, check_input=True, queue=None):
64
+ first_pass = not hasattr(self, "components_")
65
+
66
+ if check_input:
67
+ if sklearn_check_version("1.0"):
68
+ X = self._validate_data(
69
+ X, dtype=[np.float64, np.float32], reset=first_pass
70
+ )
71
+ else:
72
+ X = check_array(
73
+ X,
74
+ dtype=[np.float64, np.float32],
75
+ copy=self.copy,
76
+ )
77
+
78
+ n_samples, n_features = X.shape
79
+
80
+ if self.n_components is None:
81
+ if not hasattr(self, "components_"):
82
+ self.n_components_ = min(n_samples, n_features)
83
+ else:
84
+ self.n_components_ = self.components_.shape[0]
85
+ elif not self.n_components <= n_features:
86
+ raise ValueError(
87
+ "n_components=%r invalid for n_features=%d, need "
88
+ "more rows than columns for IncrementalPCA "
89
+ "processing" % (self.n_components, n_features)
90
+ )
91
+ elif not self.n_components <= n_samples:
92
+ raise ValueError(
93
+ "n_components=%r must be less or equal to "
94
+ "the batch number of samples "
95
+ "%d." % (self.n_components, n_samples)
96
+ )
97
+ else:
98
+ self.n_components_ = self.n_components
99
+
100
+ if not hasattr(self, "n_samples_seen_"):
101
+ self.n_samples_seen_ = n_samples
102
+ else:
103
+ self.n_samples_seen_ += n_samples
104
+
105
+ onedal_params = {"n_components": self.n_components_, "whiten": self.whiten}
106
+
107
+ if not hasattr(self, "_onedal_estimator"):
108
+ self._onedal_estimator = self._onedal_incremental_pca(**onedal_params)
109
+ self._onedal_estimator.partial_fit(X, queue)
110
+ self._need_to_finalize = True
111
+
112
+ def _onedal_finalize_fit(self):
113
+ assert hasattr(self, "_onedal_estimator")
114
+ self._onedal_estimator.finalize_fit()
115
+ self._need_to_finalize = False
116
+
117
+ def _onedal_fit(self, X, queue=None):
118
+ if sklearn_check_version("1.2"):
119
+ self._validate_params()
120
+
121
+ if sklearn_check_version("1.0"):
122
+ X = self._validate_data(X, dtype=[np.float64, np.float32], copy=self.copy)
123
+ else:
124
+ X = check_array(
125
+ X,
126
+ dtype=[np.float64, np.float32],
127
+ copy=self.copy,
128
+ )
129
+
130
+ n_samples, n_features = X.shape
131
+
132
+ if self.batch_size is None:
133
+ self.batch_size_ = 5 * n_features
134
+ else:
135
+ self.batch_size_ = self.batch_size
136
+
137
+ self.n_samples_seen_ = 0
138
+ if hasattr(self, "_onedal_estimator"):
139
+ self._onedal_estimator._reset()
140
+
141
+ for batch in gen_batches(n_samples, self.batch_size_):
142
+ X_batch = X[batch]
143
+ self._onedal_partial_fit(X_batch, queue=queue)
144
+
145
+ self._onedal_finalize_fit()
146
+
147
+ return self
148
+
149
+ def _onedal_supported(self, method_name, *data):
150
+ patching_status = PatchingConditionsChain(
151
+ f"sklearn.decomposition.{self.__class__.__name__}.{method_name}"
152
+ )
153
+ return patching_status
154
+
155
+ _onedal_cpu_supported = _onedal_supported
156
+ _onedal_gpu_supported = _onedal_supported
157
+
158
+ def __getattr__(self, attr):
159
+ if attr in self._need_to_finalize_attrs:
160
+ if hasattr(self, "_onedal_estimator"):
161
+ if self._need_to_finalize:
162
+ self._onedal_finalize_fit()
163
+ return getattr(self._onedal_estimator, attr)
164
+ else:
165
+ raise AttributeError(
166
+ f"'{self.__class__.__name__}' object has no attribute '{attr}'"
167
+ )
168
+ if attr in self.__dict__:
169
+ return self.__dict__[attr]
170
+
171
+ raise AttributeError(
172
+ f"'{self.__class__.__name__}' object has no attribute '{attr}'"
173
+ )
174
+
175
+ def partial_fit(self, X, y=None, check_input=True):
176
+ dispatch(
177
+ self,
178
+ "partial_fit",
179
+ {
180
+ "onedal": self.__class__._onedal_partial_fit,
181
+ "sklearn": sklearn_IncrementalPCA.partial_fit,
182
+ },
183
+ X,
184
+ check_input=check_input,
185
+ )
186
+ return self
187
+
188
+ def fit(self, X, y=None):
189
+ dispatch(
190
+ self,
191
+ "fit",
192
+ {
193
+ "onedal": self.__class__._onedal_fit,
194
+ "sklearn": sklearn_IncrementalPCA.fit,
195
+ },
196
+ X,
197
+ )
198
+ return self
199
+
200
+ @wrap_output_data
201
+ def transform(self, X):
202
+ return dispatch(
203
+ self,
204
+ "transform",
205
+ {
206
+ "onedal": self.__class__._onedal_transform,
207
+ "sklearn": sklearn_IncrementalPCA.transform,
208
+ },
209
+ X,
210
+ )
211
+
212
+ @wrap_output_data
213
+ def fit_transform(self, X, y=None, **fit_params):
214
+ return dispatch(
215
+ self,
216
+ "fit_transform",
217
+ {
218
+ "onedal": self.__class__._onedal_fit_transform,
219
+ "sklearn": sklearn_IncrementalPCA.fit_transform,
220
+ },
221
+ X,
222
+ )
223
+
224
+ __doc__ = sklearn_IncrementalPCA.__doc__
225
+ fit.__doc__ = sklearn_IncrementalPCA.fit.__doc__
226
+ fit_transform.__doc__ = sklearn_IncrementalPCA.fit_transform.__doc__
227
+ transform.__doc__ = sklearn_IncrementalPCA.transform.__doc__
228
+ partial_fit.__doc__ = sklearn_IncrementalPCA.partial_fit.__doc__
@@ -0,0 +1,266 @@
1
+ # ===============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ import numpy as np
18
+ import pytest
19
+ from numpy.testing import assert_allclose
20
+
21
+ from daal4py.sklearn._utils import daal_check_version
22
+ from onedal.tests.utils._dataframes_support import (
23
+ _as_numpy,
24
+ _convert_to_dataframe,
25
+ get_dataframes_and_queues,
26
+ )
27
+ from sklearnex.preview.decomposition import IncrementalPCA
28
+
29
+
30
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
31
+ def test_sklearnex_import(dataframe, queue):
32
+ X = [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]
33
+ X = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
34
+ incpca = IncrementalPCA(n_components=2)
35
+ result = incpca.fit(X)
36
+ assert "sklearnex" in incpca.__module__
37
+ assert hasattr(incpca, "_onedal_estimator")
38
+ assert_allclose(_as_numpy(result.singular_values_), [6.30061232, 0.54980396])
39
+
40
+
41
+ def check_pca_on_gold_data(incpca, dtype, whiten, transformed_data):
42
+ expected_n_samples_seen_ = 6
43
+ expected_n_features_in_ = 2
44
+ expected_n_components_ = 2
45
+ expected_components_ = np.array([[0.83849224, 0.54491354], [-0.54491354, 0.83849224]])
46
+ expected_singular_values_ = np.array([6.30061232, 0.54980396])
47
+ expected_mean_ = np.array([0, 0])
48
+ expected_var_ = np.array([5.6, 2.4])
49
+ expected_explained_variance_ = np.array([7.93954312, 0.06045688])
50
+ expected_explained_variance_ratio_ = np.array([0.99244289, 0.00755711])
51
+ expected_noise_variance_ = 0.0
52
+ expected_transformed_data = (
53
+ np.array(
54
+ [
55
+ [-0.49096647, -1.19399271],
56
+ [-0.78854479, 1.02218579],
57
+ [-1.27951125, -0.17180692],
58
+ [0.49096647, 1.19399271],
59
+ [0.78854479, -1.02218579],
60
+ [1.27951125, 0.17180692],
61
+ ]
62
+ )
63
+ if whiten
64
+ else np.array(
65
+ [
66
+ [-1.38340578, -0.2935787],
67
+ [-2.22189802, 0.25133484],
68
+ [-3.6053038, -0.04224385],
69
+ [1.38340578, 0.2935787],
70
+ [2.22189802, -0.25133484],
71
+ [3.6053038, 0.04224385],
72
+ ]
73
+ )
74
+ )
75
+
76
+ tol = 1e-7
77
+ if transformed_data.dtype == np.float32:
78
+ tol = 7e-6 if whiten else 1e-6
79
+
80
+ assert incpca.n_samples_seen_ == expected_n_samples_seen_
81
+ assert incpca.n_features_in_ == expected_n_features_in_
82
+ assert incpca.n_components_ == expected_n_components_
83
+
84
+ assert_allclose(incpca.singular_values_, expected_singular_values_, atol=tol)
85
+ assert_allclose(incpca.mean_, expected_mean_, atol=tol)
86
+ assert_allclose(incpca.var_, expected_var_, atol=tol)
87
+ assert_allclose(incpca.explained_variance_, expected_explained_variance_, atol=tol)
88
+ assert_allclose(
89
+ incpca.explained_variance_ratio_, expected_explained_variance_ratio_, atol=tol
90
+ )
91
+ assert np.abs(incpca.noise_variance_ - expected_noise_variance_) < tol
92
+ if daal_check_version((2024, "P", 500)):
93
+ assert_allclose(incpca.components_, expected_components_, atol=tol)
94
+ assert_allclose(_as_numpy(transformed_data), expected_transformed_data, atol=tol)
95
+ else:
96
+ for i in range(incpca.n_components_):
97
+ abs_dot_product = np.abs(
98
+ np.dot(incpca.components_[i], expected_components_[i])
99
+ )
100
+ assert np.abs(abs_dot_product - 1.0) < tol
101
+
102
+ if np.dot(incpca.components_[i], expected_components_[i]) < 0:
103
+ assert_allclose(
104
+ _as_numpy(-transformed_data[i]),
105
+ expected_transformed_data[i],
106
+ atol=tol,
107
+ )
108
+ else:
109
+ assert_allclose(
110
+ _as_numpy(transformed_data[i]), expected_transformed_data[i], atol=tol
111
+ )
112
+
113
+
114
+ def check_pca(incpca, dtype, whiten, data, transformed_data):
115
+ tol = 3e-3 if transformed_data.dtype == np.float32 else 2e-6
116
+
117
+ n_components = incpca.n_components_
118
+
119
+ expected_n_samples_seen = data.shape[0]
120
+ expected_n_features_in = data.shape[1]
121
+ n_samples_seen = incpca.n_samples_seen_
122
+ n_features_in = incpca.n_features_in_
123
+ assert n_samples_seen == expected_n_samples_seen
124
+ assert n_features_in == expected_n_features_in
125
+
126
+ components = incpca.components_
127
+ singular_values = incpca.singular_values_
128
+ centered_data = data - np.mean(data, axis=0)
129
+ cov_eigenvalues, cov_eigenvectors = np.linalg.eig(
130
+ centered_data.T @ centered_data / (n_samples_seen - 1)
131
+ )
132
+ cov_eigenvalues = np.nan_to_num(cov_eigenvalues)
133
+ cov_eigenvalues[cov_eigenvalues < 0] = 0
134
+ eigenvalues_order = np.argsort(cov_eigenvalues)[::-1]
135
+ sorted_eigenvalues = cov_eigenvalues[eigenvalues_order]
136
+ sorted_eigenvectors = cov_eigenvectors[:, eigenvalues_order]
137
+ expected_singular_values = np.sqrt(sorted_eigenvalues * (n_samples_seen - 1))[
138
+ :n_components
139
+ ]
140
+ expected_components = sorted_eigenvectors.T[:n_components]
141
+
142
+ assert_allclose(singular_values, expected_singular_values, atol=tol)
143
+ for i in range(n_components):
144
+ component_length = np.dot(components[i], components[i])
145
+ assert np.abs(component_length - 1.0) < tol
146
+ abs_dot_product = np.abs(np.dot(components[i], expected_components[i]))
147
+ assert np.abs(abs_dot_product - 1.0) < tol
148
+
149
+ expected_mean = np.mean(data, axis=0)
150
+ assert_allclose(incpca.mean_, expected_mean, atol=tol)
151
+
152
+ expected_var = np.var(_as_numpy(data), ddof=1, axis=0)
153
+ assert_allclose(incpca.var_, expected_var, atol=tol)
154
+
155
+ expected_explained_variance = sorted_eigenvalues[:n_components]
156
+ assert_allclose(incpca.explained_variance_, expected_explained_variance, atol=tol)
157
+
158
+ expected_explained_variance_ratio = expected_explained_variance / np.sum(
159
+ sorted_eigenvalues
160
+ )
161
+ assert_allclose(
162
+ incpca.explained_variance_ratio_, expected_explained_variance_ratio, atol=tol
163
+ )
164
+
165
+ expected_noise_variance = (
166
+ np.mean(sorted_eigenvalues[n_components:])
167
+ if len(sorted_eigenvalues) > n_components
168
+ else 0.0
169
+ )
170
+ # TODO Fix noise variance computation (It is necessary to update C++ side)
171
+ # assert np.abs(incpca.noise_variance_ - expected_noise_variance) < tol
172
+
173
+ expected_transformed_data = centered_data @ components.T
174
+ if whiten:
175
+ scale = np.sqrt(incpca.explained_variance_)
176
+ min_scale = np.finfo(scale.dtype).eps
177
+ scale[scale < min_scale] = np.inf
178
+ expected_transformed_data /= scale
179
+
180
+ if not (whiten and n_components == n_samples_seen):
181
+ assert_allclose(_as_numpy(transformed_data), expected_transformed_data, atol=tol)
182
+
183
+
184
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
185
+ @pytest.mark.parametrize("whiten", [True, False])
186
+ @pytest.mark.parametrize("num_blocks", [1, 2, 3])
187
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
188
+ def test_sklearnex_partial_fit_on_gold_data(dataframe, queue, whiten, num_blocks, dtype):
189
+
190
+ X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
191
+ X = X.astype(dtype=dtype)
192
+ X_split = np.array_split(X, num_blocks)
193
+ incpca = IncrementalPCA(whiten=whiten)
194
+
195
+ for i in range(num_blocks):
196
+ X_split_df = _convert_to_dataframe(
197
+ X_split[i], sycl_queue=queue, target_df=dataframe
198
+ )
199
+ incpca.partial_fit(X_split_df)
200
+
201
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
202
+ transformed_data = incpca.transform(X_df)
203
+ check_pca_on_gold_data(incpca, dtype, whiten, transformed_data)
204
+
205
+
206
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
207
+ @pytest.mark.parametrize("whiten", [True, False])
208
+ @pytest.mark.parametrize("num_blocks", [1, 2, 3])
209
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
210
+ def test_sklearnex_fit_on_gold_data(dataframe, queue, whiten, num_blocks, dtype):
211
+
212
+ X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
213
+ X = X.astype(dtype=dtype)
214
+ incpca = IncrementalPCA(whiten=whiten, batch_size=X.shape[0] // num_blocks)
215
+
216
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
217
+ incpca.fit(X_df)
218
+ transformed_data = incpca.transform(X_df)
219
+
220
+ check_pca_on_gold_data(incpca, dtype, whiten, transformed_data)
221
+
222
+
223
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
224
+ @pytest.mark.parametrize("whiten", [True, False])
225
+ @pytest.mark.parametrize("num_blocks", [1, 2, 3])
226
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
227
+ def test_sklearnex_fit_transform_on_gold_data(
228
+ dataframe, queue, whiten, num_blocks, dtype
229
+ ):
230
+
231
+ X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
232
+ X = X.astype(dtype=dtype)
233
+ incpca = IncrementalPCA(whiten=whiten, batch_size=X.shape[0] // num_blocks)
234
+
235
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
236
+ transformed_data = incpca.fit_transform(X_df)
237
+
238
+ check_pca_on_gold_data(incpca, dtype, whiten, transformed_data)
239
+
240
+
241
+ @pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
242
+ @pytest.mark.parametrize("n_components", [None, 1, 5])
243
+ @pytest.mark.parametrize("whiten", [True, False])
244
+ @pytest.mark.parametrize("num_blocks", [1, 10])
245
+ @pytest.mark.parametrize("row_count", [100, 1000])
246
+ @pytest.mark.parametrize("column_count", [10, 100])
247
+ @pytest.mark.parametrize("dtype", [np.float32, np.float64])
248
+ def test_sklearnex_partial_fit_on_random_data(
249
+ dataframe, queue, n_components, whiten, num_blocks, row_count, column_count, dtype
250
+ ):
251
+ seed = 81
252
+ gen = np.random.default_rng(seed)
253
+ X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
254
+ X = X.astype(dtype=dtype)
255
+ X_split = np.array_split(X, num_blocks)
256
+ incpca = IncrementalPCA(n_components=n_components, whiten=whiten)
257
+
258
+ for i in range(num_blocks):
259
+ X_split_df = _convert_to_dataframe(
260
+ X_split[i], sycl_queue=queue, target_df=dataframe
261
+ )
262
+ incpca.partial_fit(X_split_df)
263
+
264
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
265
+ transformed_data = incpca.transform(X_df)
266
+ check_pca(incpca, dtype, whiten, X, transformed_data)