scikit-learn-intelex 2024.4.0__py312-none-manylinux1_x86_64.whl → 2024.6.0__py312-none-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {scikit_learn_intelex-2024.4.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/METADATA +2 -2
  2. {scikit_learn_intelex-2024.4.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/RECORD +43 -36
  3. sklearnex/_device_offload.py +8 -1
  4. sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +2 -4
  5. sklearnex/cluster/dbscan.py +3 -0
  6. sklearnex/cluster/tests/test_dbscan.py +8 -6
  7. sklearnex/conftest.py +11 -1
  8. sklearnex/covariance/incremental_covariance.py +217 -30
  9. sklearnex/covariance/tests/test_incremental_covariance.py +54 -17
  10. sklearnex/decomposition/pca.py +68 -13
  11. sklearnex/decomposition/tests/test_pca.py +6 -4
  12. sklearnex/dispatcher.py +46 -1
  13. sklearnex/ensemble/_forest.py +114 -22
  14. sklearnex/ensemble/tests/test_forest.py +13 -3
  15. sklearnex/glob/dispatcher.py +16 -2
  16. sklearnex/linear_model/__init__.py +5 -3
  17. sklearnex/linear_model/incremental_linear.py +464 -0
  18. sklearnex/linear_model/linear.py +27 -9
  19. sklearnex/linear_model/logistic_regression.py +13 -15
  20. sklearnex/linear_model/tests/test_incremental_linear.py +200 -0
  21. sklearnex/linear_model/tests/test_linear.py +2 -2
  22. sklearnex/neighbors/knn_regression.py +24 -0
  23. sklearnex/neighbors/tests/test_neighbors.py +2 -2
  24. sklearnex/preview/__init__.py +1 -1
  25. sklearnex/preview/decomposition/__init__.py +19 -0
  26. sklearnex/preview/decomposition/incremental_pca.py +228 -0
  27. sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
  28. sklearnex/svm/_common.py +165 -20
  29. sklearnex/svm/nusvc.py +40 -4
  30. sklearnex/svm/nusvr.py +31 -2
  31. sklearnex/svm/svc.py +40 -4
  32. sklearnex/svm/svr.py +31 -2
  33. sklearnex/tests/_utils.py +70 -29
  34. sklearnex/tests/test_common.py +54 -0
  35. sklearnex/tests/test_memory_usage.py +195 -132
  36. sklearnex/tests/test_n_jobs_support.py +4 -0
  37. sklearnex/tests/test_patching.py +22 -10
  38. sklearnex/tests/test_run_to_run_stability.py +283 -0
  39. sklearnex/utils/_namespace.py +1 -1
  40. sklearnex/utils/tests/test_finite.py +89 -0
  41. sklearnex/tests/test_run_to_run_stability_tests.py +0 -428
  42. {scikit_learn_intelex-2024.4.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/LICENSE.txt +0 -0
  43. {scikit_learn_intelex-2024.4.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/WHEEL +0 -0
  44. {scikit_learn_intelex-2024.4.0.dist-info → scikit_learn_intelex-2024.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,464 @@
1
+ # ===============================================================================
2
+ # Copyright 2024 Intel Corporation
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # ===============================================================================
16
+
17
+ import numbers
18
+ import warnings
19
+
20
+ import numpy as np
21
+ from sklearn.base import BaseEstimator, MultiOutputMixin, RegressorMixin
22
+ from sklearn.exceptions import NotFittedError
23
+ from sklearn.metrics import r2_score
24
+ from sklearn.utils import check_array, gen_batches
25
+
26
+ from daal4py.sklearn._n_jobs_support import control_n_jobs
27
+ from daal4py.sklearn._utils import sklearn_check_version
28
+ from onedal.linear_model import (
29
+ IncrementalLinearRegression as onedal_IncrementalLinearRegression,
30
+ )
31
+
32
+ if sklearn_check_version("1.2"):
33
+ from sklearn.utils._param_validation import Interval
34
+
35
+ from onedal.common.hyperparameters import get_hyperparameters
36
+
37
+ from .._device_offload import dispatch, wrap_output_data
38
+ from .._utils import PatchingConditionsChain, register_hyperparameters
39
+
40
+
41
+ @register_hyperparameters(
42
+ {
43
+ "fit": get_hyperparameters("linear_regression", "train"),
44
+ "partial_fit": get_hyperparameters("linear_regression", "train"),
45
+ }
46
+ )
47
+ @control_n_jobs(
48
+ decorated_methods=["fit", "partial_fit", "predict", "_onedal_finalize_fit"]
49
+ )
50
+ class IncrementalLinearRegression(MultiOutputMixin, RegressorMixin, BaseEstimator):
51
+ """
52
+ Incremental estimator for linear regression.
53
+ Allows to train linear regression if data are splitted into batches.
54
+
55
+ Parameters
56
+ ----------
57
+ fit_intercept : bool, default=True
58
+ Whether to calculate the intercept for this model. If set
59
+ to False, no intercept will be used in calculations
60
+ (i.e. data is expected to be centered).
61
+
62
+ copy_X : bool, default=True
63
+ If True, X will be copied; else, it may be overwritten.
64
+
65
+ n_jobs : int, default=None
66
+ The number of jobs to use for the computation.
67
+
68
+ batch_size : int, default=None
69
+ The number of samples to use for each batch. Only used when calling
70
+ ``fit``. If ``batch_size`` is ``None``, then ``batch_size``
71
+ is inferred from the data and set to ``5 * n_features``, to provide a
72
+ balance between approximation accuracy and memory consumption.
73
+
74
+ Attributes
75
+ ----------
76
+ coef_ : array of shape (n_features, ) or (n_targets, n_features)
77
+ Estimated coefficients for the linear regression problem.
78
+ If multiple targets are passed during the fit (y 2D), this
79
+ is a 2D array of shape (n_targets, n_features), while if only
80
+ one target is passed, this is a 1D array of length n_features.
81
+
82
+ intercept_ : float or array of shape (n_targets,)
83
+ Independent term in the linear model. Set to 0.0 if
84
+ `fit_intercept = False`.
85
+
86
+ n_features_in_ : int
87
+ Number of features seen during :term:`fit`.
88
+
89
+ n_samples_seen_ : int
90
+ The number of samples processed by the estimator. Will be reset on
91
+ new calls to fit, but increments across ``partial_fit`` calls.
92
+ It should be not less than `n_features_in_` if `fit_intercept`
93
+ is False and not less than `n_features_in_` + 1 if `fit_intercept`
94
+ is True to obtain regression coefficients.
95
+
96
+ batch_size_ : int
97
+ Inferred batch size from ``batch_size``.
98
+
99
+ n_features_in_ : int
100
+ Number of features seen during :term:`fit` `partial_fit`.
101
+
102
+ """
103
+
104
+ _onedal_incremental_linear = staticmethod(onedal_IncrementalLinearRegression)
105
+
106
+ if sklearn_check_version("1.2"):
107
+ _parameter_constraints: dict = {
108
+ "fit_intercept": ["boolean"],
109
+ "copy_X": ["boolean"],
110
+ "n_jobs": [Interval(numbers.Integral, -1, None, closed="left"), None],
111
+ "batch_size": [Interval(numbers.Integral, 1, None, closed="left"), None],
112
+ }
113
+
114
+ def __init__(self, *, fit_intercept=True, copy_X=True, n_jobs=None, batch_size=None):
115
+ self.fit_intercept = fit_intercept
116
+ self.copy_X = copy_X
117
+ self.n_jobs = n_jobs
118
+ self.batch_size = batch_size
119
+
120
+ def _onedal_supported(self, method_name, *data):
121
+ patching_status = PatchingConditionsChain(
122
+ f"sklearn.linear_model.{self.__class__.__name__}.{method_name}"
123
+ )
124
+ return patching_status
125
+
126
+ _onedal_cpu_supported = _onedal_supported
127
+ _onedal_gpu_supported = _onedal_supported
128
+
129
+ def _onedal_predict(self, X, queue=None):
130
+ if sklearn_check_version("1.2"):
131
+ self._validate_params()
132
+
133
+ if sklearn_check_version("1.0"):
134
+ X = self._validate_data(
135
+ X,
136
+ dtype=[np.float64, np.float32],
137
+ copy=self.copy_X,
138
+ reset=False,
139
+ )
140
+ else:
141
+ X = check_array(
142
+ X,
143
+ dtype=[np.float64, np.float32],
144
+ copy=self.copy_X,
145
+ )
146
+
147
+ assert hasattr(self, "_onedal_estimator")
148
+ if self._need_to_finalize:
149
+ self._onedal_finalize_fit()
150
+ return self._onedal_estimator.predict(X, queue)
151
+
152
+ def _onedal_score(self, X, y, sample_weight=None, queue=None):
153
+ return r2_score(
154
+ y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
155
+ )
156
+
157
+ def _onedal_partial_fit(self, X, y, check_input=True, queue=None):
158
+ first_pass = not hasattr(self, "n_samples_seen_") or self.n_samples_seen_ == 0
159
+
160
+ if sklearn_check_version("1.2"):
161
+ self._validate_params()
162
+
163
+ if check_input:
164
+ if sklearn_check_version("1.0"):
165
+ X, y = self._validate_data(
166
+ X,
167
+ y,
168
+ dtype=[np.float64, np.float32],
169
+ reset=first_pass,
170
+ copy=self.copy_X,
171
+ multi_output=True,
172
+ force_all_finite=False,
173
+ )
174
+ else:
175
+ X = check_array(
176
+ X,
177
+ dtype=[np.float64, np.float32],
178
+ copy=self.copy_X,
179
+ force_all_finite=False,
180
+ )
181
+ y = check_array(
182
+ y,
183
+ dtype=[np.float64, np.float32],
184
+ copy=False,
185
+ ensure_2d=False,
186
+ force_all_finite=False,
187
+ )
188
+
189
+ if first_pass:
190
+ self.n_samples_seen_ = X.shape[0]
191
+ self.n_features_in_ = X.shape[1]
192
+ else:
193
+ self.n_samples_seen_ += X.shape[0]
194
+ onedal_params = {"fit_intercept": self.fit_intercept, "copy_X": self.copy_X}
195
+ if not hasattr(self, "_onedal_estimator"):
196
+ self._onedal_estimator = self._onedal_incremental_linear(**onedal_params)
197
+ self._onedal_estimator.partial_fit(X, y, queue)
198
+ self._need_to_finalize = True
199
+
200
+ def _onedal_finalize_fit(self):
201
+ assert hasattr(self, "_onedal_estimator")
202
+ is_underdetermined = self.n_samples_seen_ < self.n_features_in_ + int(
203
+ self.fit_intercept
204
+ )
205
+ if is_underdetermined:
206
+ raise ValueError("Not enough samples to finalize")
207
+ self._onedal_estimator.finalize_fit()
208
+ self._need_to_finalize = False
209
+
210
+ def _onedal_fit(self, X, y, queue=None):
211
+ if sklearn_check_version("1.2"):
212
+ self._validate_params()
213
+
214
+ if sklearn_check_version("1.0"):
215
+ X, y = self._validate_data(
216
+ X,
217
+ y,
218
+ dtype=[np.float64, np.float32],
219
+ copy=self.copy_X,
220
+ multi_output=True,
221
+ ensure_2d=True,
222
+ )
223
+ else:
224
+ X = check_array(
225
+ X,
226
+ dtype=[np.float64, np.float32],
227
+ copy=self.copy_X,
228
+ )
229
+ y = check_array(
230
+ y,
231
+ dtype=[np.float64, np.float32],
232
+ copy=False,
233
+ ensure_2d=False,
234
+ )
235
+
236
+ n_samples, n_features = X.shape
237
+
238
+ is_underdetermined = n_samples < n_features + int(self.fit_intercept)
239
+ if is_underdetermined:
240
+ raise ValueError("Not enough samples to run oneDAL backend")
241
+
242
+ if self.batch_size is None:
243
+ self.batch_size_ = 5 * n_features
244
+ else:
245
+ self.batch_size_ = self.batch_size
246
+
247
+ self.n_samples_seen_ = 0
248
+ if hasattr(self, "_onedal_estimator"):
249
+ self._onedal_estimator._reset()
250
+
251
+ for batch in gen_batches(n_samples, self.batch_size_):
252
+ X_batch, y_batch = X[batch], y[batch]
253
+ self._onedal_partial_fit(X_batch, y_batch, check_input=False, queue=queue)
254
+
255
+ if sklearn_check_version("1.2"):
256
+ self._validate_params()
257
+
258
+ # finite check occurs on onedal side
259
+ self.n_features_in_ = n_features
260
+
261
+ if n_samples == 1:
262
+ warnings.warn(
263
+ "Only one sample available. You may want to reshape your data array"
264
+ )
265
+
266
+ self._onedal_finalize_fit()
267
+
268
+ return self
269
+
270
+ def get_intercept_(self):
271
+ if hasattr(self, "_onedal_estimator"):
272
+ if self._need_to_finalize:
273
+ self._onedal_finalize_fit()
274
+
275
+ return self._onedal_estimator.intercept_
276
+ else:
277
+ raise AttributeError(
278
+ f"'{self.__class__.__name__}' object has no attribute 'intercept_'"
279
+ )
280
+
281
+ def set_intercept_(self, value):
282
+ self.__dict__["intercept_"] = value
283
+ if hasattr(self, "_onedal_estimator"):
284
+ self._onedal_estimator.intercept_ = value
285
+ del self._onedal_estimator._onedal_model
286
+
287
+ def get_coef_(self):
288
+ if hasattr(self, "_onedal_estimator"):
289
+ if self._need_to_finalize:
290
+ self._onedal_finalize_fit()
291
+
292
+ return self._onedal_estimator.coef_
293
+ else:
294
+ raise AttributeError(
295
+ f"'{self.__class__.__name__}' object has no attribute 'coef_'"
296
+ )
297
+
298
+ def set_coef_(self, value):
299
+ self.__dict__["coef_"] = value
300
+ if hasattr(self, "_onedal_estimator"):
301
+ self._onedal_estimator.coef_ = value
302
+ del self._onedal_estimator._onedal_model
303
+
304
+ coef_ = property(get_coef_, set_coef_)
305
+ intercept_ = property(get_intercept_, set_intercept_)
306
+
307
+ def partial_fit(self, X, y, check_input=True):
308
+ """
309
+ Incremental fit linear model with X and y. All of X and y is
310
+ processed as a single batch.
311
+
312
+ Parameters
313
+ ----------
314
+ X : array-like of shape (n_samples, n_features)
315
+ Training data, where `n_samples` is the number of samples and
316
+ `n_features` is the number of features.
317
+
318
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
319
+ Target values, where `n_samples` is the number of samples and
320
+ `n_targets` is the number of targets.
321
+
322
+ Returns
323
+ -------
324
+ self : object
325
+ Returns the instance itself.
326
+ """
327
+
328
+ dispatch(
329
+ self,
330
+ "partial_fit",
331
+ {
332
+ "onedal": self.__class__._onedal_partial_fit,
333
+ "sklearn": None,
334
+ },
335
+ X,
336
+ y,
337
+ check_input=check_input,
338
+ )
339
+ return self
340
+
341
+ def fit(self, X, y):
342
+ """
343
+ Fit the model with X and y, using minibatches of size batch_size.
344
+
345
+ Parameters
346
+ ----------
347
+ X : array-like of shape (n_samples, n_features)
348
+ Training data, where `n_samples` is the number of samples and
349
+ `n_features` is the number of features. It is necessary for
350
+ `n_samples` to be not less than `n_features` if `fit_intercept`
351
+ is False and not less than `n_features` + 1 if `fit_intercept`
352
+ is True
353
+
354
+ y : array-like of shape (n_samples,) or (n_samples, n_targets)
355
+ Target values, where `n_samples` is the number of samples and
356
+ `n_targets` is the number of targets.
357
+
358
+ Returns
359
+ -------
360
+ self : object
361
+ Returns the instance itself.
362
+ """
363
+
364
+ dispatch(
365
+ self,
366
+ "fit",
367
+ {
368
+ "onedal": self.__class__._onedal_fit,
369
+ "sklearn": None,
370
+ },
371
+ X,
372
+ y,
373
+ )
374
+ return self
375
+
376
+ @wrap_output_data
377
+ def predict(self, X, y=None):
378
+ """
379
+ Predict using the linear model.
380
+ Parameters
381
+ ----------
382
+ X : array-like or sparse matrix, shape (n_samples, n_features)
383
+ Samples.
384
+ Returns
385
+ -------
386
+ C : array, shape (n_samples, n_targets)
387
+ Returns predicted values.
388
+ """
389
+ if not hasattr(self, "coef_"):
390
+ msg = (
391
+ "This %(name)s instance is not fitted yet. Call 'fit' or 'partial_fit' "
392
+ "with appropriate arguments before using this estimator."
393
+ )
394
+ raise NotFittedError(msg % {"name": self.__class__.__name__})
395
+
396
+ return dispatch(
397
+ self,
398
+ "predict",
399
+ {
400
+ "onedal": self.__class__._onedal_predict,
401
+ "sklearn": None,
402
+ },
403
+ X,
404
+ )
405
+
406
+ @wrap_output_data
407
+ def score(self, X, y, sample_weight=None):
408
+ """Return the coefficient of determination of the prediction.
409
+
410
+ The coefficient of determination :math:`R^2` is defined as
411
+ :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
412
+ sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
413
+ is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
414
+ The best possible score is 1.0 and it can be negative (because the
415
+ model can be arbitrarily worse). A constant model that always predicts
416
+ the expected value of `y`, disregarding the input features, would get
417
+ a :math:`R^2` score of 0.0.
418
+
419
+ Parameters
420
+ ----------
421
+ X : array-like of shape (n_samples, n_features)
422
+ Test samples. For some estimators this may be a precomputed
423
+ kernel matrix or a list of generic objects instead with shape
424
+ ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
425
+ is the number of samples used in the fitting for the estimator.
426
+
427
+ y : array-like of shape (n_samples,) or (n_samples, n_outputs)
428
+ True values for `X`.
429
+
430
+ sample_weight : array-like of shape (n_samples,), default=None
431
+ Sample weights.
432
+
433
+ Returns
434
+ -------
435
+ score : float
436
+ :math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
437
+
438
+ Notes
439
+ -----
440
+ The :math:`R^2` score used when calling ``score`` on a regressor uses
441
+ ``multioutput='uniform_average'`` from version 0.23 to keep consistent
442
+ with default value of :func:`~sklearn.metrics.r2_score`.
443
+ This influences the ``score`` method of all the multioutput
444
+ regressors (except for
445
+ :class:`~sklearn.multioutput.MultiOutputRegressor`).
446
+ """
447
+ if not hasattr(self, "coef_"):
448
+ msg = (
449
+ "This %(name)s instance is not fitted yet. Call 'fit' or 'partial_fit' "
450
+ "with appropriate arguments before using this estimator."
451
+ )
452
+ raise NotFittedError(msg % {"name": self.__class__.__name__})
453
+
454
+ return dispatch(
455
+ self,
456
+ "score",
457
+ {
458
+ "onedal": self.__class__._onedal_score,
459
+ "sklearn": None,
460
+ },
461
+ X,
462
+ y,
463
+ sample_weight=sample_weight,
464
+ )
@@ -20,6 +20,7 @@ from abc import ABC
20
20
  import numpy as np
21
21
  from sklearn.exceptions import NotFittedError
22
22
  from sklearn.linear_model import LinearRegression as sklearn_LinearRegression
23
+ from sklearn.metrics import r2_score
23
24
 
24
25
  from daal4py.sklearn._n_jobs_support import control_n_jobs
25
26
  from daal4py.sklearn._utils import sklearn_check_version
@@ -123,6 +124,20 @@ class LinearRegression(sklearn_LinearRegression):
123
124
  X,
124
125
  )
125
126
 
127
+ @wrap_output_data
128
+ def score(self, X, y, sample_weight=None):
129
+ return dispatch(
130
+ self,
131
+ "score",
132
+ {
133
+ "onedal": self.__class__._onedal_score,
134
+ "sklearn": sklearn_LinearRegression.score,
135
+ },
136
+ X,
137
+ y,
138
+ sample_weight=sample_weight,
139
+ )
140
+
126
141
  def _test_type_and_finiteness(self, X_in):
127
142
  X = X_in if isinstance(X_in, np.ndarray) else np.asarray(X_in)
128
143
 
@@ -157,7 +172,7 @@ class LinearRegression(sklearn_LinearRegression):
157
172
  n_features = _num_features(X, fallback_1d=True)
158
173
 
159
174
  # Check if equations are well defined
160
- is_good_for_onedal = n_samples >= (n_features + int(self.fit_intercept))
175
+ is_underdetermined = n_samples < (n_features + int(self.fit_intercept))
161
176
 
162
177
  dal_ready = patching_status.and_conditions(
163
178
  [
@@ -172,7 +187,7 @@ class LinearRegression(sklearn_LinearRegression):
172
187
  "Forced positive coefficients are not supported.",
173
188
  ),
174
189
  (
175
- is_good_for_onedal,
190
+ not is_underdetermined,
176
191
  "The shape of X (fitting) does not satisfy oneDAL requirements:"
177
192
  "Number of features + 1 >= number of samples.",
178
193
  ),
@@ -193,22 +208,19 @@ class LinearRegression(sklearn_LinearRegression):
193
208
  return patching_status
194
209
 
195
210
  def _onedal_predict_supported(self, method_name, *data):
196
- assert method_name == "predict"
197
- assert len(data) == 1
198
-
199
211
  class_name = self.__class__.__name__
200
212
  patching_status = PatchingConditionsChain(
201
213
  f"sklearn.linear_model.{class_name}.predict"
202
214
  )
203
215
 
204
- n_samples = _num_samples(*data)
216
+ n_samples = _num_samples(data[0])
205
217
  model_is_sparse = issparse(self.coef_) or (
206
218
  self.fit_intercept and issparse(self.intercept_)
207
219
  )
208
220
  dal_ready = patching_status.and_conditions(
209
221
  [
210
222
  (n_samples > 0, "Number of samples is less than 1."),
211
- (not issparse(*data), "Sparse input is not supported."),
223
+ (not issparse(data[0]), "Sparse input is not supported."),
212
224
  (not model_is_sparse, "Sparse coefficients are not supported."),
213
225
  ]
214
226
  )
@@ -216,7 +228,7 @@ class LinearRegression(sklearn_LinearRegression):
216
228
  return patching_status
217
229
 
218
230
  patching_status.and_condition(
219
- self._test_type_and_finiteness(*data), "Input X is not supported."
231
+ self._test_type_and_finiteness(data[0]), "Input X is not supported."
220
232
  )
221
233
 
222
234
  return patching_status
@@ -224,7 +236,7 @@ class LinearRegression(sklearn_LinearRegression):
224
236
  def _onedal_supported(self, method_name, *data):
225
237
  if method_name == "fit":
226
238
  return self._onedal_fit_supported(method_name, *data)
227
- if method_name == "predict":
239
+ if method_name in ["predict", "score"]:
228
240
  return self._onedal_predict_supported(method_name, *data)
229
241
  raise RuntimeError(f"Unknown method {method_name} in {self.__class__.__name__}")
230
242
 
@@ -286,6 +298,11 @@ class LinearRegression(sklearn_LinearRegression):
286
298
  res = self._onedal_estimator.predict(X, queue=queue)
287
299
  return res
288
300
 
301
+ def _onedal_score(self, X, y, sample_weight=None, queue=None):
302
+ return r2_score(
303
+ y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
304
+ )
305
+
289
306
  def get_coef_(self):
290
307
  return self.coef_
291
308
 
@@ -314,3 +331,4 @@ class LinearRegression(sklearn_LinearRegression):
314
331
 
315
332
  fit.__doc__ = sklearn_LinearRegression.fit.__doc__
316
333
  predict.__doc__ = sklearn_LinearRegression.predict.__doc__
334
+ score.__doc__ = sklearn_LinearRegression.score.__doc__
@@ -21,18 +21,6 @@ from daal4py.sklearn._utils import daal_check_version
21
21
  from daal4py.sklearn.linear_model.logistic_path import (
22
22
  LogisticRegression as LogisticRegression_daal4py,
23
23
  )
24
- from daal4py.sklearn.linear_model.logistic_path import daal4py_fit, daal4py_predict
25
-
26
-
27
- class BaseLogisticRegression(ABC):
28
- def _save_attributes(self):
29
- assert hasattr(self, "_onedal_estimator")
30
- self.classes_ = self._onedal_estimator.classes_
31
- self.coef_ = self._onedal_estimator.coef_
32
- self.intercept_ = self._onedal_estimator.intercept_
33
- self.n_features_in_ = self._onedal_estimator.n_features_in_
34
- self.n_iter_ = self._onedal_estimator.n_iter_
35
-
36
24
 
37
25
  if daal_check_version((2024, "P", 1)):
38
26
  import numpy as np
@@ -44,6 +32,7 @@ if daal_check_version((2024, "P", 1)):
44
32
 
45
33
  from daal4py.sklearn._n_jobs_support import control_n_jobs
46
34
  from daal4py.sklearn._utils import sklearn_check_version
35
+ from daal4py.sklearn.linear_model.logistic_path import daal4py_fit, daal4py_predict
47
36
  from onedal.linear_model import LogisticRegression as onedal_LogisticRegression
48
37
  from onedal.utils import _num_samples
49
38
 
@@ -51,6 +40,15 @@ if daal_check_version((2024, "P", 1)):
51
40
  from .._utils import PatchingConditionsChain, get_patch_message
52
41
  from ..utils.validation import _assert_all_finite
53
42
 
43
+ class BaseLogisticRegression(ABC):
44
+ def _save_attributes(self):
45
+ assert hasattr(self, "_onedal_estimator")
46
+ self.classes_ = self._onedal_estimator.classes_
47
+ self.coef_ = self._onedal_estimator.coef_
48
+ self.intercept_ = self._onedal_estimator.intercept_
49
+ self.n_features_in_ = self._onedal_estimator.n_features_in_
50
+ self.n_iter_ = self._onedal_estimator.n_iter_
51
+
54
52
  @control_n_jobs(
55
53
  decorated_methods=[
56
54
  "fit",
@@ -82,7 +80,7 @@ if daal_check_version((2024, "P", 1)):
82
80
  random_state=None,
83
81
  solver="lbfgs",
84
82
  max_iter=100,
85
- multi_class="auto",
83
+ multi_class="deprecated" if sklearn_check_version("1.5") else "auto",
86
84
  verbose=0,
87
85
  warm_start=False,
88
86
  n_jobs=None,
@@ -146,7 +144,7 @@ if daal_check_version((2024, "P", 1)):
146
144
  self._check_feature_names(X, reset=False)
147
145
  return dispatch(
148
146
  self,
149
- "predict",
147
+ "predict_proba",
150
148
  {
151
149
  "onedal": self.__class__._onedal_predict_proba,
152
150
  "sklearn": sklearn_LogisticRegression.predict_proba,
@@ -160,7 +158,7 @@ if daal_check_version((2024, "P", 1)):
160
158
  self._check_feature_names(X, reset=False)
161
159
  return dispatch(
162
160
  self,
163
- "predict",
161
+ "predict_log_proba",
164
162
  {
165
163
  "onedal": self.__class__._onedal_predict_log_proba,
166
164
  "sklearn": sklearn_LogisticRegression.predict_log_proba,