scikit-learn-intelex 2024.5.0__py39-none-manylinux1_x86_64.whl → 2024.6.0__py39-none-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of scikit-learn-intelex might be problematic. Click here for more details.

Files changed (35) hide show
  1. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/METADATA +2 -2
  2. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/RECORD +34 -30
  3. sklearnex/cluster/dbscan.py +3 -0
  4. sklearnex/cluster/tests/test_dbscan.py +8 -6
  5. sklearnex/conftest.py +11 -1
  6. sklearnex/decomposition/tests/test_pca.py +4 -2
  7. sklearnex/dispatcher.py +15 -1
  8. sklearnex/ensemble/_forest.py +114 -23
  9. sklearnex/ensemble/tests/test_forest.py +13 -3
  10. sklearnex/glob/dispatcher.py +16 -2
  11. sklearnex/linear_model/incremental_linear.py +102 -25
  12. sklearnex/linear_model/linear.py +25 -7
  13. sklearnex/linear_model/logistic_regression.py +13 -15
  14. sklearnex/linear_model/tests/test_incremental_linear.py +10 -10
  15. sklearnex/linear_model/tests/test_linear.py +2 -2
  16. sklearnex/neighbors/knn_regression.py +24 -0
  17. sklearnex/preview/__init__.py +1 -1
  18. sklearnex/preview/decomposition/__init__.py +19 -0
  19. sklearnex/preview/decomposition/incremental_pca.py +228 -0
  20. sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
  21. sklearnex/svm/_common.py +165 -20
  22. sklearnex/svm/nusvc.py +40 -4
  23. sklearnex/svm/nusvr.py +31 -2
  24. sklearnex/svm/svc.py +40 -4
  25. sklearnex/svm/svr.py +31 -2
  26. sklearnex/tests/_utils.py +49 -17
  27. sklearnex/tests/test_common.py +54 -0
  28. sklearnex/tests/test_memory_usage.py +185 -126
  29. sklearnex/tests/test_patching.py +5 -12
  30. sklearnex/tests/test_run_to_run_stability.py +283 -0
  31. sklearnex/utils/_namespace.py +1 -1
  32. sklearnex/tests/test_run_to_run_stability_tests.py +0 -428
  33. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/LICENSE.txt +0 -0
  34. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/WHEEL +0 -0
  35. {scikit_learn_intelex-2024.5.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/top_level.txt +0 -0
@@ -20,6 +20,7 @@ import warnings
20
20
  import numpy as np
21
21
  from sklearn.base import BaseEstimator, MultiOutputMixin, RegressorMixin
22
22
  from sklearn.exceptions import NotFittedError
23
+ from sklearn.metrics import r2_score
23
24
  from sklearn.utils import check_array, gen_batches
24
25
 
25
26
  from daal4py.sklearn._n_jobs_support import control_n_jobs
@@ -134,6 +135,7 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
134
135
  X,
135
136
  dtype=[np.float64, np.float32],
136
137
  copy=self.copy_X,
138
+ reset=False,
137
139
  )
138
140
  else:
139
141
  X = check_array(
@@ -147,33 +149,42 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
147
149
  self._onedal_finalize_fit()
148
150
  return self._onedal_estimator.predict(X, queue)
149
151
 
150
- def _onedal_partial_fit(self, X, y, queue=None):
152
+ def _onedal_score(self, X, y, sample_weight=None, queue=None):
153
+ return r2_score(
154
+ y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
155
+ )
156
+
157
+ def _onedal_partial_fit(self, X, y, check_input=True, queue=None):
151
158
  first_pass = not hasattr(self, "n_samples_seen_") or self.n_samples_seen_ == 0
152
159
 
153
160
  if sklearn_check_version("1.2"):
154
161
  self._validate_params()
155
162
 
156
- if sklearn_check_version("1.0"):
157
- X, y = self._validate_data(
158
- X,
159
- y,
160
- dtype=[np.float64, np.float32],
161
- reset=first_pass,
162
- copy=self.copy_X,
163
- multi_output=True,
164
- )
165
- else:
166
- X = check_array(
167
- X,
168
- dtype=[np.float64, np.float32],
169
- copy=self.copy_X,
170
- )
171
- y = check_array(
172
- y,
173
- dtype=[np.float64, np.float32],
174
- copy=False,
175
- ensure_2d=False,
176
- )
163
+ if check_input:
164
+ if sklearn_check_version("1.0"):
165
+ X, y = self._validate_data(
166
+ X,
167
+ y,
168
+ dtype=[np.float64, np.float32],
169
+ reset=first_pass,
170
+ copy=self.copy_X,
171
+ multi_output=True,
172
+ force_all_finite=False,
173
+ )
174
+ else:
175
+ X = check_array(
176
+ X,
177
+ dtype=[np.float64, np.float32],
178
+ copy=self.copy_X,
179
+ force_all_finite=False,
180
+ )
181
+ y = check_array(
182
+ y,
183
+ dtype=[np.float64, np.float32],
184
+ copy=False,
185
+ ensure_2d=False,
186
+ force_all_finite=False,
187
+ )
177
188
 
178
189
  if first_pass:
179
190
  self.n_samples_seen_ = X.shape[0]
@@ -202,7 +213,12 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
202
213
 
203
214
  if sklearn_check_version("1.0"):
204
215
  X, y = self._validate_data(
205
- X, y, dtype=[np.float64, np.float32], copy=self.copy_X, multi_output=True
216
+ X,
217
+ y,
218
+ dtype=[np.float64, np.float32],
219
+ copy=self.copy_X,
220
+ multi_output=True,
221
+ ensure_2d=True,
206
222
  )
207
223
  else:
208
224
  X = check_array(
@@ -234,7 +250,7 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
234
250
 
235
251
  for batch in gen_batches(n_samples, self.batch_size_):
236
252
  X_batch, y_batch = X[batch], y[batch]
237
- self._onedal_partial_fit(X_batch, y_batch, queue=queue)
253
+ self._onedal_partial_fit(X_batch, y_batch, check_input=False, queue=queue)
238
254
 
239
255
  if sklearn_check_version("1.2"):
240
256
  self._validate_params()
@@ -288,7 +304,7 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
288
304
  coef_ = property(get_coef_, set_coef_)
289
305
  intercept_ = property(get_intercept_, set_intercept_)
290
306
 
291
- def partial_fit(self, X, y):
307
+ def partial_fit(self, X, y, check_input=True):
292
308
  """
293
309
  Incremental fit linear model with X and y. All of X and y is
294
310
  processed as a single batch.
@@ -318,6 +334,7 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
318
334
  },
319
335
  X,
320
336
  y,
337
+ check_input=check_input,
321
338
  )
322
339
  return self
323
340
 
@@ -385,3 +402,63 @@ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimato
385
402
  },
386
403
  X,
387
404
  )
405
+
406
+ @wrap_output_data
407
+ def score(self, X, y, sample_weight=None):
408
+ """Return the coefficient of determination of the prediction.
409
+
410
+ The coefficient of determination :math:`R^2` is defined as
411
+ :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
412
+ sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
413
+ is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
414
+ The best possible score is 1.0 and it can be negative (because the
415
+ model can be arbitrarily worse). A constant model that always predicts
416
+ the expected value of `y`, disregarding the input features, would get
417
+ a :math:`R^2` score of 0.0.
418
+
419
+ Parameters
420
+ ----------
421
+ X : array-like of shape (n_samples, n_features)
422
+ Test samples. For some estimators this may be a precomputed
423
+ kernel matrix or a list of generic objects instead with shape
424
+ ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
425
+ is the number of samples used in the fitting for the estimator.
426
+
427
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
428
+ True values for `X`.
429
+
430
+ sample_weight : array-like of shape (n_samples,), default=None
431
+ Sample weights.
432
+
433
+ Returns
434
+ -------
435
+ score : float
436
+ :math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
437
+
438
+ Notes
439
+ -----
440
+ The :math:`R^2` score used when calling ``score`` on a regressor uses
441
+ ``multioutput='uniform_average'`` from version 0.23 to keep consistent
442
+ with default value of :func:`~sklearn.metrics.r2_score`.
443
+ This influences the ``score`` method of all the multioutput
444
+ regressors (except for
445
+ :class:`~sklearn.multioutput.MultiOutputRegressor`).
446
+ """
447
+ if not hasattr(self, "coef_"):
448
+ msg = (
449
+ "This %(name)s instance is not fitted yet. Call 'fit' or 'partial_fit' "
450
+ "with appropriate arguments before using this estimator."
451
+ )
452
+ raise NotFittedError(msg % {"name": self.__class__.__name__})
453
+
454
+ return dispatch(
455
+ self,
456
+ "score",
457
+ {
458
+ "onedal": self.__class__._onedal_score,
459
+ "sklearn": None,
460
+ },
461
+ X,
462
+ y,
463
+ sample_weight=sample_weight,
464
+ )
@@ -20,6 +20,7 @@ from abc import ABC
20
20
  import numpy as np
21
21
  from sklearn.exceptions import NotFittedError
22
22
  from sklearn.linear_model import LinearRegression as sklearn_LinearRegression
23
+ from sklearn.metrics import r2_score
23
24
 
24
25
  from daal4py.sklearn._n_jobs_support import control_n_jobs
25
26
  from daal4py.sklearn._utils import sklearn_check_version
@@ -123,6 +124,20 @@ class LinearRegression(sklearn_LinearRegression):
123
124
  X,
124
125
  )
125
126
 
127
+ @wrap_output_data
128
+ def score(self, X, y, sample_weight=None):
129
+ return dispatch(
130
+ self,
131
+ "score",
132
+ {
133
+ "onedal": self.__class__._onedal_score,
134
+ "sklearn": sklearn_LinearRegression.score,
135
+ },
136
+ X,
137
+ y,
138
+ sample_weight=sample_weight,
139
+ )
140
+
126
141
  def _test_type_and_finiteness(self, X_in):
127
142
  X = X_in if isinstance(X_in, np.ndarray) else np.asarray(X_in)
128
143
 
@@ -193,22 +208,19 @@ class LinearRegression(sklearn_LinearRegression):
193
208
  return patching_status
194
209
 
195
210
  def _onedal_predict_supported(self, method_name, *data):
196
- assert method_name == "predict"
197
- assert len(data) == 1
198
-
199
211
  class_name = self.__class__.__name__
200
212
  patching_status = PatchingConditionsChain(
201
213
  f"sklearn.linear_model.{class_name}.predict"
202
214
  )
203
215
 
204
- n_samples = _num_samples(*data)
216
+ n_samples = _num_samples(data[0])
205
217
  model_is_sparse = issparse(self.coef_) or (
206
218
  self.fit_intercept and issparse(self.intercept_)
207
219
  )
208
220
  dal_ready = patching_status.and_conditions(
209
221
  [
210
222
  (n_samples > 0, "Number of samples is less than 1."),
211
- (not issparse(*data), "Sparse input is not supported."),
223
+ (not issparse(data[0]), "Sparse input is not supported."),
212
224
  (not model_is_sparse, "Sparse coefficients are not supported."),
213
225
  ]
214
226
  )
@@ -216,7 +228,7 @@ class LinearRegression(sklearn_LinearRegression):
216
228
  return patching_status
217
229
 
218
230
  patching_status.and_condition(
219
- self._test_type_and_finiteness(*data), "Input X is not supported."
231
+ self._test_type_and_finiteness(data[0]), "Input X is not supported."
220
232
  )
221
233
 
222
234
  return patching_status
@@ -224,7 +236,7 @@ class LinearRegression(sklearn_LinearRegression):
224
236
  def _onedal_supported(self, method_name, *data):
225
237
  if method_name == "fit":
226
238
  return self._onedal_fit_supported(method_name, *data)
227
- if method_name == "predict":
239
+ if method_name in ["predict", "score"]:
228
240
  return self._onedal_predict_supported(method_name, *data)
229
241
  raise RuntimeError(f"Unknown method {method_name} in {self.__class__.__name__}")
230
242
 
@@ -286,6 +298,11 @@ class LinearRegression(sklearn_LinearRegression):
286
298
  res = self._onedal_estimator.predict(X, queue=queue)
287
299
  return res
288
300
 
301
+ def _onedal_score(self, X, y, sample_weight=None, queue=None):
302
+ return r2_score(
303
+ y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
304
+ )
305
+
289
306
  def get_coef_(self):
290
307
  return self.coef_
291
308
 
@@ -314,3 +331,4 @@ class LinearRegression(sklearn_LinearRegression):
314
331
 
315
332
  fit.__doc__ = sklearn_LinearRegression.fit.__doc__
316
333
  predict.__doc__ = sklearn_LinearRegression.predict.__doc__
334
+ score.__doc__ = sklearn_LinearRegression.score.__doc__
@@ -21,18 +21,6 @@ from daal4py.sklearn._utils import daal_check_version
21
21
  from daal4py.sklearn.linear_model.logistic_path import (
22
22
  LogisticRegression as LogisticRegression_daal4py,
23
23
  )
24
- from daal4py.sklearn.linear_model.logistic_path import daal4py_fit, daal4py_predict
25
-
26
-
27
- class BaseLogisticRegression(ABC):
28
- def _save_attributes(self):
29
- assert hasattr(self, "_onedal_estimator")
30
- self.classes_ = self._onedal_estimator.classes_
31
- self.coef_ = self._onedal_estimator.coef_
32
- self.intercept_ = self._onedal_estimator.intercept_
33
- self.n_features_in_ = self._onedal_estimator.n_features_in_
34
- self.n_iter_ = self._onedal_estimator.n_iter_
35
-
36
24
 
37
25
  if daal_check_version((2024, "P", 1)):
38
26
  import numpy as np
@@ -44,6 +32,7 @@ if daal_check_version((2024, "P", 1)):
44
32
 
45
33
  from daal4py.sklearn._n_jobs_support import control_n_jobs
46
34
  from daal4py.sklearn._utils import sklearn_check_version
35
+ from daal4py.sklearn.linear_model.logistic_path import daal4py_fit, daal4py_predict
47
36
  from onedal.linear_model import LogisticRegression as onedal_LogisticRegression
48
37
  from onedal.utils import _num_samples
49
38
 
@@ -51,6 +40,15 @@ if daal_check_version((2024, "P", 1)):
51
40
  from .._utils import PatchingConditionsChain, get_patch_message
52
41
  from ..utils.validation import _assert_all_finite
53
42
 
43
+ class BaseLogisticRegression(ABC):
44
+ def _save_attributes(self):
45
+ assert hasattr(self, "_onedal_estimator")
46
+ self.classes_ = self._onedal_estimator.classes_
47
+ self.coef_ = self._onedal_estimator.coef_
48
+ self.intercept_ = self._onedal_estimator.intercept_
49
+ self.n_features_in_ = self._onedal_estimator.n_features_in_
50
+ self.n_iter_ = self._onedal_estimator.n_iter_
51
+
54
52
  @control_n_jobs(
55
53
  decorated_methods=[
56
54
  "fit",
@@ -82,7 +80,7 @@ if daal_check_version((2024, "P", 1)):
82
80
  random_state=None,
83
81
  solver="lbfgs",
84
82
  max_iter=100,
85
- multi_class="auto",
83
+ multi_class="deprecated" if sklearn_check_version("1.5") else "auto",
86
84
  verbose=0,
87
85
  warm_start=False,
88
86
  n_jobs=None,
@@ -146,7 +144,7 @@ if daal_check_version((2024, "P", 1)):
146
144
  self._check_feature_names(X, reset=False)
147
145
  return dispatch(
148
146
  self,
149
- "predict",
147
+ "predict_proba",
150
148
  {
151
149
  "onedal": self.__class__._onedal_predict_proba,
152
150
  "sklearn": sklearn_LogisticRegression.predict_proba,
@@ -160,7 +158,7 @@ if daal_check_version((2024, "P", 1)):
160
158
  self._check_feature_names(X, reset=False)
161
159
  return dispatch(
162
160
  self,
163
- "predict",
161
+ "predict_log_proba",
164
162
  {
165
163
  "onedal": self.__class__._onedal_predict_log_proba,
166
164
  "sklearn": sklearn_LogisticRegression.predict_log_proba,
@@ -47,7 +47,7 @@ def test_sklearnex_fit_on_gold_data(dataframe, queue, fit_intercept, macro_block
47
47
 
48
48
  y_pred = inclin.predict(X_df)
49
49
 
50
- tol = 2e-6 if dtype == np.float32 else 1e-7
50
+ tol = 2e-6 if y_pred.dtype == np.float32 else 1e-7
51
51
  assert_allclose(inclin.coef_, [1], atol=tol)
52
52
  if fit_intercept:
53
53
  assert_allclose(inclin.intercept_, [0], atol=tol)
@@ -82,15 +82,15 @@ def test_sklearnex_partial_fit_on_gold_data(
82
82
  )
83
83
  inclin.partial_fit(X_split_df, y_split_df)
84
84
 
85
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
86
+ y_pred = inclin.predict(X_df)
87
+
85
88
  assert inclin.n_features_in_ == 1
86
- tol = 2e-6 if dtype == np.float32 else 1e-7
89
+ tol = 2e-6 if y_pred.dtype == np.float32 else 1e-7
87
90
  assert_allclose(inclin.coef_, [[1]], atol=tol)
88
91
  if fit_intercept:
89
92
  assert_allclose(inclin.intercept_, 3, atol=tol)
90
93
 
91
- X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
92
- y_pred = inclin.predict(X_df)
93
-
94
94
  assert_allclose(_as_numpy(y_pred), y, atol=tol)
95
95
 
96
96
 
@@ -122,15 +122,15 @@ def test_sklearnex_partial_fit_multitarget_on_gold_data(
122
122
  )
123
123
  inclin.partial_fit(X_split_df, y_split_df)
124
124
 
125
+ X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
126
+ y_pred = inclin.predict(X_df)
127
+
125
128
  assert inclin.n_features_in_ == 2
126
- tol = 7e-6 if dtype == np.float32 else 1e-7
129
+ tol = 7e-6 if y_pred.dtype == np.float32 else 1e-7
127
130
  assert_allclose(inclin.coef_, [1.0, 2.0], atol=tol)
128
131
  if fit_intercept:
129
132
  assert_allclose(inclin.intercept_, 3.0, atol=tol)
130
133
 
131
- X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
132
- y_pred = inclin.predict(X_df)
133
-
134
134
  assert_allclose(_as_numpy(y_pred), y, atol=tol)
135
135
 
136
136
 
@@ -181,7 +181,7 @@ def test_sklearnex_partial_fit_on_random_data(
181
181
  )
182
182
  inclin.partial_fit(X_split_df, y_split_df)
183
183
 
184
- tol = 1e-4 if dtype == np.float32 else 1e-7
184
+ tol = 1e-4 if inclin.coef_.dtype == np.float32 else 1e-7
185
185
  assert_allclose(coef, inclin.coef_.T, atol=tol)
186
186
 
187
187
  if fit_intercept:
@@ -52,7 +52,7 @@ def test_sklearnex_import_linear(dataframe, queue, dtype, macro_block):
52
52
  assert "sklearnex" in linreg.__module__
53
53
  assert linreg.n_features_in_ == 2
54
54
 
55
- tol = 1e-5 if dtype == np.float32 else 1e-7
55
+ tol = 1e-5 if _as_numpy(linreg.coef_).dtype == np.float32 else 1e-7
56
56
  assert_allclose(_as_numpy(linreg.intercept_), 3.0, rtol=tol)
57
57
  assert_allclose(_as_numpy(linreg.coef_), [1.0, 2.0], rtol=tol)
58
58
 
@@ -113,5 +113,5 @@ def test_sklearnex_reconstruct_model(dataframe, queue, dtype):
113
113
 
114
114
  y_pred = linreg.predict(X)
115
115
 
116
- tol = 1e-5 if dtype == np.float32 else 1e-7
116
+ tol = 1e-5 if _as_numpy(y_pred).dtype == np.float32 else 1e-7
117
117
  assert_allclose(gtr, _as_numpy(y_pred), rtol=tol)
@@ -14,6 +14,7 @@
14
14
  # limitations under the License.
15
15
  # ==============================================================================
16
16
 
17
+ from sklearn.metrics import r2_score
17
18
  from sklearn.neighbors._regression import (
18
19
  KNeighborsRegressor as sklearn_KNeighborsRegressor,
19
20
  )
@@ -117,6 +118,23 @@ class KNeighborsRegressor(sklearn_KNeighborsRegressor, KNeighborsDispatchingBase
117
118
  X,
118
119
  )
119
120
 
121
+ @wrap_output_data
122
+ def score(self, X, y, sample_weight=None):
123
+ check_is_fitted(self)
124
+ if sklearn_check_version("1.0"):
125
+ self._check_feature_names(X, reset=False)
126
+ return dispatch(
127
+ self,
128
+ "score",
129
+ {
130
+ "onedal": self.__class__._onedal_score,
131
+ "sklearn": sklearn_KNeighborsRegressor.score,
132
+ },
133
+ X,
134
+ y,
135
+ sample_weight=sample_weight,
136
+ )
137
+
120
138
  @wrap_output_data
121
139
  def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
122
140
  check_is_fitted(self)
@@ -184,6 +202,11 @@ class KNeighborsRegressor(sklearn_KNeighborsRegressor, KNeighborsDispatchingBase
184
202
  X, n_neighbors, return_distance, queue=queue
185
203
  )
186
204
 
205
+ def _onedal_score(self, X, y, sample_weight=None, queue=None):
206
+ return r2_score(
207
+ y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
208
+ )
209
+
187
210
  def _save_attributes(self):
188
211
  self.n_features_in_ = self._onedal_estimator.n_features_in_
189
212
  self.n_samples_fit_ = self._onedal_estimator.n_samples_fit_
@@ -196,3 +219,4 @@ class KNeighborsRegressor(sklearn_KNeighborsRegressor, KNeighborsDispatchingBase
196
219
  predict.__doc__ = sklearn_KNeighborsRegressor.predict.__doc__
197
220
  kneighbors.__doc__ = sklearn_KNeighborsRegressor.kneighbors.__doc__
198
221
  radius_neighbors.__doc__ = sklearn_NearestNeighbors.radius_neighbors.__doc__
222
+ score.__doc__ = sklearn_KNeighborsRegressor.score.__doc__
@@ -14,4 +14,4 @@
14
14
  # limitations under the License.
15
15
  # ==============================================================================
16
16
 
17
- __all__ = ["cluster", "covariance"]
17
+ __all__ = ["cluster", "covariance", "decomposition"]
@@ -0,0 +1,19 @@
1
+ # ===============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ from .incremental_pca import IncrementalPCA
18
+
19
+ __all__ = ["IncrementalPCA"]