scikit-learn-intelex 2024.2.0__py38-none-manylinux1_x86_64.whl → 2024.4.0__py38-none-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of scikit-learn-intelex might be problematic. Click here for more details.
- {scikit_learn_intelex-2024.2.0.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/METADATA +2 -2
- {scikit_learn_intelex-2024.2.0.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/RECORD +45 -45
- sklearnex/__init__.py +9 -7
- sklearnex/_device_offload.py +31 -4
- sklearnex/basic_statistics/__init__.py +2 -1
- sklearnex/basic_statistics/incremental_basic_statistics.py +288 -0
- sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +386 -0
- sklearnex/cluster/dbscan.py +3 -1
- sklearnex/conftest.py +63 -0
- sklearnex/decomposition/pca.py +319 -1
- sklearnex/decomposition/tests/test_pca.py +34 -5
- sklearnex/dispatcher.py +74 -43
- sklearnex/ensemble/_forest.py +78 -89
- sklearnex/ensemble/tests/test_forest.py +15 -19
- sklearnex/linear_model/linear.py +275 -340
- sklearnex/linear_model/logistic_regression.py +63 -11
- sklearnex/linear_model/tests/test_linear.py +40 -5
- sklearnex/linear_model/tests/test_logreg.py +0 -2
- sklearnex/neighbors/_lof.py +74 -20
- sklearnex/neighbors/common.py +4 -1
- sklearnex/neighbors/knn_classification.py +44 -131
- sklearnex/neighbors/knn_regression.py +16 -126
- sklearnex/neighbors/knn_unsupervised.py +11 -86
- sklearnex/neighbors/tests/test_neighbors.py +0 -5
- sklearnex/preview/__init__.py +1 -1
- sklearnex/preview/cluster/k_means.py +5 -73
- sklearnex/preview/covariance/covariance.py +6 -5
- sklearnex/preview/covariance/tests/test_covariance.py +18 -5
- sklearnex/spmd/ensemble/forest.py +4 -12
- sklearnex/svm/_common.py +4 -7
- sklearnex/svm/nusvc.py +70 -50
- sklearnex/svm/nusvr.py +6 -52
- sklearnex/svm/svc.py +70 -51
- sklearnex/svm/svr.py +3 -49
- sklearnex/tests/_utils.py +164 -0
- sklearnex/tests/test_memory_usage.py +8 -3
- sklearnex/tests/test_monkeypatch.py +177 -149
- sklearnex/tests/test_n_jobs_support.py +8 -2
- sklearnex/tests/test_parallel.py +6 -8
- sklearnex/tests/test_patching.py +322 -87
- sklearnex/utils/__init__.py +2 -1
- sklearnex/utils/_namespace.py +97 -0
- sklearnex/preview/decomposition/__init__.py +0 -19
- sklearnex/preview/decomposition/pca.py +0 -374
- sklearnex/preview/decomposition/tests/test_preview_pca.py +0 -42
- sklearnex/tests/_models_info.py +0 -170
- sklearnex/tests/utils/_launch_algorithms.py +0 -118
- {scikit_learn_intelex-2024.2.0.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/LICENSE.txt +0 -0
- {scikit_learn_intelex-2024.2.0.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/WHEEL +0 -0
- {scikit_learn_intelex-2024.2.0.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/top_level.txt +0 -0
sklearnex/linear_model/linear.py
CHANGED
|
@@ -17,365 +17,300 @@
|
|
|
17
17
|
import logging
|
|
18
18
|
from abc import ABC
|
|
19
19
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
if daal_check_version((2023, "P", 100)):
|
|
65
|
-
import numpy as np
|
|
66
|
-
from sklearn.linear_model import LinearRegression as sklearn_LinearRegression
|
|
67
|
-
|
|
68
|
-
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
69
|
-
from daal4py.sklearn._utils import get_dtype, make2d, sklearn_check_version
|
|
70
|
-
|
|
71
|
-
from .._device_offload import dispatch, wrap_output_data
|
|
72
|
-
from .._utils import (
|
|
73
|
-
PatchingConditionsChain,
|
|
74
|
-
get_patch_message,
|
|
75
|
-
register_hyperparameters,
|
|
76
|
-
)
|
|
77
|
-
from ..utils.validation import _assert_all_finite
|
|
20
|
+
import numpy as np
|
|
21
|
+
from sklearn.exceptions import NotFittedError
|
|
22
|
+
from sklearn.linear_model import LinearRegression as sklearn_LinearRegression
|
|
23
|
+
|
|
24
|
+
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
25
|
+
from daal4py.sklearn._utils import sklearn_check_version
|
|
26
|
+
|
|
27
|
+
from .._device_offload import dispatch, wrap_output_data
|
|
28
|
+
from .._utils import PatchingConditionsChain, get_patch_message, register_hyperparameters
|
|
29
|
+
from ..utils.validation import _assert_all_finite
|
|
30
|
+
|
|
31
|
+
if sklearn_check_version("1.0") and not sklearn_check_version("1.2"):
|
|
32
|
+
from sklearn.linear_model._base import _deprecate_normalize
|
|
33
|
+
|
|
34
|
+
from scipy.sparse import issparse
|
|
35
|
+
from sklearn.utils.validation import check_X_y
|
|
36
|
+
|
|
37
|
+
from onedal.common.hyperparameters import get_hyperparameters
|
|
38
|
+
from onedal.linear_model import LinearRegression as onedal_LinearRegression
|
|
39
|
+
from onedal.utils import _num_features, _num_samples
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@register_hyperparameters({"fit": get_hyperparameters("linear_regression", "train")})
|
|
43
|
+
@control_n_jobs(decorated_methods=["fit", "predict"])
|
|
44
|
+
class LinearRegression(sklearn_LinearRegression):
|
|
45
|
+
__doc__ = sklearn_LinearRegression.__doc__
|
|
46
|
+
|
|
47
|
+
if sklearn_check_version("1.2"):
|
|
48
|
+
_parameter_constraints: dict = {**sklearn_LinearRegression._parameter_constraints}
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
fit_intercept=True,
|
|
53
|
+
copy_X=True,
|
|
54
|
+
n_jobs=None,
|
|
55
|
+
positive=False,
|
|
56
|
+
):
|
|
57
|
+
super().__init__(
|
|
58
|
+
fit_intercept=fit_intercept,
|
|
59
|
+
copy_X=copy_X,
|
|
60
|
+
n_jobs=n_jobs,
|
|
61
|
+
positive=positive,
|
|
62
|
+
)
|
|
78
63
|
|
|
79
|
-
|
|
80
|
-
|
|
64
|
+
else:
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
fit_intercept=True,
|
|
69
|
+
normalize="deprecated" if sklearn_check_version("1.0") else False,
|
|
70
|
+
copy_X=True,
|
|
71
|
+
n_jobs=None,
|
|
72
|
+
positive=False,
|
|
73
|
+
):
|
|
74
|
+
super().__init__(
|
|
75
|
+
fit_intercept=fit_intercept,
|
|
76
|
+
normalize=normalize,
|
|
77
|
+
copy_X=copy_X,
|
|
78
|
+
n_jobs=n_jobs,
|
|
79
|
+
positive=positive,
|
|
80
|
+
)
|
|
81
81
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
82
|
+
def fit(self, X, y, sample_weight=None):
|
|
83
|
+
if sklearn_check_version("1.0"):
|
|
84
|
+
self._check_feature_names(X, reset=True)
|
|
85
|
+
if sklearn_check_version("1.2"):
|
|
86
|
+
self._validate_params()
|
|
87
|
+
|
|
88
|
+
# It is necessary to properly update coefs for predict if we
|
|
89
|
+
# fallback to sklearn in dispatch
|
|
90
|
+
if hasattr(self, "_onedal_estimator"):
|
|
91
|
+
del self._onedal_estimator
|
|
92
|
+
|
|
93
|
+
dispatch(
|
|
94
|
+
self,
|
|
95
|
+
"fit",
|
|
96
|
+
{
|
|
97
|
+
"onedal": self.__class__._onedal_fit,
|
|
98
|
+
"sklearn": sklearn_LinearRegression.fit,
|
|
99
|
+
},
|
|
100
|
+
X,
|
|
101
|
+
y,
|
|
102
|
+
sample_weight,
|
|
103
|
+
)
|
|
104
|
+
return self
|
|
105
|
+
|
|
106
|
+
@wrap_output_data
|
|
107
|
+
def predict(self, X):
|
|
108
|
+
|
|
109
|
+
if not hasattr(self, "coef_"):
|
|
110
|
+
msg = (
|
|
111
|
+
"This %(name)s instance is not fitted yet. Call 'fit' with "
|
|
112
|
+
"appropriate arguments before using this estimator."
|
|
113
|
+
)
|
|
114
|
+
raise NotFittedError(msg % {"name": self.__class__.__name__})
|
|
115
|
+
|
|
116
|
+
return dispatch(
|
|
117
|
+
self,
|
|
118
|
+
"predict",
|
|
119
|
+
{
|
|
120
|
+
"onedal": self.__class__._onedal_predict,
|
|
121
|
+
"sklearn": sklearn_LinearRegression.predict,
|
|
122
|
+
},
|
|
123
|
+
X,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
def _test_type_and_finiteness(self, X_in):
|
|
127
|
+
X = X_in if isinstance(X_in, np.ndarray) else np.asarray(X_in)
|
|
128
|
+
|
|
129
|
+
dtype = X.dtype
|
|
130
|
+
if "complex" in str(type(dtype)):
|
|
131
|
+
return False
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
_assert_all_finite(X)
|
|
135
|
+
except BaseException:
|
|
136
|
+
return False
|
|
137
|
+
return True
|
|
138
|
+
|
|
139
|
+
def _onedal_fit_supported(self, method_name, *data):
|
|
140
|
+
assert method_name == "fit"
|
|
141
|
+
assert len(data) == 3
|
|
142
|
+
X, y, sample_weight = data
|
|
143
|
+
|
|
144
|
+
class_name = self.__class__.__name__
|
|
145
|
+
patching_status = PatchingConditionsChain(
|
|
146
|
+
f"sklearn.linear_model.{class_name}.fit"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
normalize_is_set = (
|
|
150
|
+
hasattr(self, "normalize")
|
|
151
|
+
and self.normalize
|
|
152
|
+
and self.normalize != "deprecated"
|
|
153
|
+
)
|
|
154
|
+
positive_is_set = hasattr(self, "positive") and self.positive
|
|
155
|
+
|
|
156
|
+
n_samples = _num_samples(X)
|
|
157
|
+
n_features = _num_features(X, fallback_1d=True)
|
|
158
|
+
|
|
159
|
+
# Check if equations are well defined
|
|
160
|
+
is_good_for_onedal = n_samples >= (n_features + int(self.fit_intercept))
|
|
161
|
+
|
|
162
|
+
dal_ready = patching_status.and_conditions(
|
|
163
|
+
[
|
|
164
|
+
(sample_weight is None, "Sample weight is not supported."),
|
|
165
|
+
(
|
|
166
|
+
not issparse(X) and not issparse(y),
|
|
167
|
+
"Sparse input is not supported.",
|
|
168
|
+
),
|
|
169
|
+
(not normalize_is_set, "Normalization is not supported."),
|
|
170
|
+
(
|
|
171
|
+
not positive_is_set,
|
|
172
|
+
"Forced positive coefficients are not supported.",
|
|
173
|
+
),
|
|
174
|
+
(
|
|
175
|
+
is_good_for_onedal,
|
|
176
|
+
"The shape of X (fitting) does not satisfy oneDAL requirements:"
|
|
177
|
+
"Number of features + 1 >= number of samples.",
|
|
178
|
+
),
|
|
179
|
+
]
|
|
180
|
+
)
|
|
181
|
+
if not dal_ready:
|
|
182
|
+
return patching_status
|
|
85
183
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
184
|
+
if not patching_status.and_condition(
|
|
185
|
+
self._test_type_and_finiteness(X), "Input X is not supported."
|
|
186
|
+
):
|
|
187
|
+
return patching_status
|
|
89
188
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
189
|
+
patching_status.and_condition(
|
|
190
|
+
self._test_type_and_finiteness(y), "Input y is not supported."
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
return patching_status
|
|
194
|
+
|
|
195
|
+
def _onedal_predict_supported(self, method_name, *data):
|
|
196
|
+
assert method_name == "predict"
|
|
197
|
+
assert len(data) == 1
|
|
198
|
+
|
|
199
|
+
class_name = self.__class__.__name__
|
|
200
|
+
patching_status = PatchingConditionsChain(
|
|
201
|
+
f"sklearn.linear_model.{class_name}.predict"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
n_samples = _num_samples(*data)
|
|
205
|
+
model_is_sparse = issparse(self.coef_) or (
|
|
206
|
+
self.fit_intercept and issparse(self.intercept_)
|
|
207
|
+
)
|
|
208
|
+
dal_ready = patching_status.and_conditions(
|
|
209
|
+
[
|
|
210
|
+
(n_samples > 0, "Number of samples is less than 1."),
|
|
211
|
+
(not issparse(*data), "Sparse input is not supported."),
|
|
212
|
+
(not model_is_sparse, "Sparse coefficients are not supported."),
|
|
213
|
+
]
|
|
214
|
+
)
|
|
215
|
+
if not dal_ready:
|
|
216
|
+
return patching_status
|
|
95
217
|
|
|
218
|
+
patching_status.and_condition(
|
|
219
|
+
self._test_type_and_finiteness(*data), "Input X is not supported."
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
return patching_status
|
|
223
|
+
|
|
224
|
+
def _onedal_supported(self, method_name, *data):
|
|
225
|
+
if method_name == "fit":
|
|
226
|
+
return self._onedal_fit_supported(method_name, *data)
|
|
227
|
+
if method_name == "predict":
|
|
228
|
+
return self._onedal_predict_supported(method_name, *data)
|
|
229
|
+
raise RuntimeError(f"Unknown method {method_name} in {self.__class__.__name__}")
|
|
230
|
+
|
|
231
|
+
_onedal_gpu_supported = _onedal_supported
|
|
232
|
+
_onedal_cpu_supported = _onedal_supported
|
|
233
|
+
|
|
234
|
+
def _initialize_onedal_estimator(self):
|
|
235
|
+
onedal_params = {"fit_intercept": self.fit_intercept, "copy_X": self.copy_X}
|
|
236
|
+
self._onedal_estimator = onedal_LinearRegression(**onedal_params)
|
|
237
|
+
|
|
238
|
+
def _onedal_fit(self, X, y, sample_weight, queue=None):
|
|
239
|
+
assert sample_weight is None
|
|
240
|
+
|
|
241
|
+
check_params = {
|
|
242
|
+
"X": X,
|
|
243
|
+
"y": y,
|
|
244
|
+
"dtype": [np.float64, np.float32],
|
|
245
|
+
"accept_sparse": ["csr", "csc", "coo"],
|
|
246
|
+
"y_numeric": True,
|
|
247
|
+
"multi_output": True,
|
|
248
|
+
"force_all_finite": False,
|
|
249
|
+
}
|
|
96
250
|
if sklearn_check_version("1.2"):
|
|
97
|
-
|
|
98
|
-
**sklearn_LinearRegression._parameter_constraints
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
def __init__(
|
|
102
|
-
self,
|
|
103
|
-
fit_intercept=True,
|
|
104
|
-
copy_X=True,
|
|
105
|
-
n_jobs=None,
|
|
106
|
-
positive=False,
|
|
107
|
-
):
|
|
108
|
-
super().__init__(
|
|
109
|
-
fit_intercept=fit_intercept,
|
|
110
|
-
copy_X=copy_X,
|
|
111
|
-
n_jobs=n_jobs,
|
|
112
|
-
positive=positive,
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
elif sklearn_check_version("0.24"):
|
|
116
|
-
|
|
117
|
-
def __init__(
|
|
118
|
-
self,
|
|
119
|
-
fit_intercept=True,
|
|
120
|
-
normalize="deprecated" if sklearn_check_version("1.0") else False,
|
|
121
|
-
copy_X=True,
|
|
122
|
-
n_jobs=None,
|
|
123
|
-
positive=False,
|
|
124
|
-
):
|
|
125
|
-
super().__init__(
|
|
126
|
-
fit_intercept=fit_intercept,
|
|
127
|
-
normalize=normalize,
|
|
128
|
-
copy_X=copy_X,
|
|
129
|
-
n_jobs=n_jobs,
|
|
130
|
-
positive=positive,
|
|
131
|
-
)
|
|
132
|
-
|
|
251
|
+
X, y = self._validate_data(**check_params)
|
|
133
252
|
else:
|
|
253
|
+
X, y = check_X_y(**check_params)
|
|
134
254
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
n_jobs=None,
|
|
141
|
-
):
|
|
142
|
-
super().__init__(
|
|
143
|
-
fit_intercept=fit_intercept,
|
|
144
|
-
normalize=normalize,
|
|
145
|
-
copy_X=copy_X,
|
|
146
|
-
n_jobs=n_jobs,
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
def fit(self, X, y, sample_weight=None):
|
|
150
|
-
"""
|
|
151
|
-
Fit linear model.
|
|
152
|
-
Parameters
|
|
153
|
-
----------
|
|
154
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
155
|
-
Training data.
|
|
156
|
-
y : array-like of shape (n_samples,) or (n_samples, n_targets)
|
|
157
|
-
Target values. Will be cast to X's dtype if necessary.
|
|
158
|
-
sample_weight : array-like of shape (n_samples,), default=None
|
|
159
|
-
Individual weights for each sample.
|
|
160
|
-
.. versionadded:: 0.17
|
|
161
|
-
parameter *sample_weight* support to LinearRegression.
|
|
162
|
-
Returns
|
|
163
|
-
-------
|
|
164
|
-
self : object
|
|
165
|
-
Fitted Estimator.
|
|
166
|
-
"""
|
|
167
|
-
if sklearn_check_version("1.0"):
|
|
168
|
-
self._check_feature_names(X, reset=True)
|
|
169
|
-
if sklearn_check_version("1.2"):
|
|
170
|
-
self._validate_params()
|
|
171
|
-
|
|
172
|
-
dispatch(
|
|
173
|
-
self,
|
|
174
|
-
"fit",
|
|
175
|
-
{
|
|
176
|
-
"onedal": self.__class__._onedal_fit,
|
|
177
|
-
"sklearn": sklearn_LinearRegression.fit,
|
|
178
|
-
},
|
|
179
|
-
X,
|
|
180
|
-
y,
|
|
181
|
-
sample_weight,
|
|
182
|
-
)
|
|
183
|
-
return self
|
|
184
|
-
|
|
185
|
-
@wrap_output_data
|
|
186
|
-
def predict(self, X):
|
|
187
|
-
"""
|
|
188
|
-
Predict using the linear model.
|
|
189
|
-
Parameters
|
|
190
|
-
----------
|
|
191
|
-
X : array-like or sparse matrix, shape (n_samples, n_features)
|
|
192
|
-
Samples.
|
|
193
|
-
Returns
|
|
194
|
-
-------
|
|
195
|
-
C : array, shape (n_samples, n_targets)
|
|
196
|
-
Returns predicted values.
|
|
197
|
-
"""
|
|
198
|
-
if sklearn_check_version("1.0"):
|
|
199
|
-
self._check_feature_names(X, reset=False)
|
|
200
|
-
return dispatch(
|
|
201
|
-
self,
|
|
202
|
-
"predict",
|
|
203
|
-
{
|
|
204
|
-
"onedal": self.__class__._onedal_predict,
|
|
205
|
-
"sklearn": sklearn_LinearRegression.predict,
|
|
206
|
-
},
|
|
207
|
-
X,
|
|
255
|
+
if sklearn_check_version("1.0") and not sklearn_check_version("1.2"):
|
|
256
|
+
self._normalize = _deprecate_normalize(
|
|
257
|
+
self.normalize,
|
|
258
|
+
default=False,
|
|
259
|
+
estimator_name=self.__class__.__name__,
|
|
208
260
|
)
|
|
209
261
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
if "complex" in str(type(dtype)):
|
|
215
|
-
return False
|
|
216
|
-
|
|
217
|
-
try:
|
|
218
|
-
_assert_all_finite(X)
|
|
219
|
-
except BaseException:
|
|
220
|
-
return False
|
|
221
|
-
return True
|
|
222
|
-
|
|
223
|
-
def _onedal_fit_supported(self, method_name, *data):
|
|
224
|
-
assert method_name == "fit"
|
|
225
|
-
assert len(data) == 3
|
|
226
|
-
X, y, sample_weight = data
|
|
262
|
+
self._initialize_onedal_estimator()
|
|
263
|
+
try:
|
|
264
|
+
self._onedal_estimator.fit(X, y, queue=queue)
|
|
265
|
+
self._save_attributes()
|
|
227
266
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
f"
|
|
267
|
+
except RuntimeError:
|
|
268
|
+
logging.getLogger("sklearnex").info(
|
|
269
|
+
f"{self.__class__.__name__}.fit "
|
|
270
|
+
+ get_patch_message("sklearn_after_onedal")
|
|
231
271
|
)
|
|
232
272
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
and self.normalize
|
|
236
|
-
and self.normalize != "deprecated"
|
|
237
|
-
)
|
|
238
|
-
positive_is_set = hasattr(self, "positive") and self.positive
|
|
239
|
-
|
|
240
|
-
n_samples = _num_samples(X)
|
|
241
|
-
n_features = _num_features(X, fallback_1d=True)
|
|
242
|
-
|
|
243
|
-
# Check if equations are well defined
|
|
244
|
-
is_good_for_onedal = n_samples > (n_features + int(self.fit_intercept))
|
|
245
|
-
|
|
246
|
-
dal_ready = patching_status.and_conditions(
|
|
247
|
-
[
|
|
248
|
-
(sample_weight is None, "Sample weight is not supported."),
|
|
249
|
-
(
|
|
250
|
-
not issparse(X) and not issparse(y),
|
|
251
|
-
"Sparse input is not supported.",
|
|
252
|
-
),
|
|
253
|
-
(not normalize_is_set, "Normalization is not supported."),
|
|
254
|
-
(
|
|
255
|
-
not positive_is_set,
|
|
256
|
-
"Forced positive coefficients are not supported.",
|
|
257
|
-
),
|
|
258
|
-
(
|
|
259
|
-
is_good_for_onedal,
|
|
260
|
-
"The shape of X (fitting) does not satisfy oneDAL requirements:."
|
|
261
|
-
"Number of features + 1 >= number of samples.",
|
|
262
|
-
),
|
|
263
|
-
]
|
|
264
|
-
)
|
|
265
|
-
if not dal_ready:
|
|
266
|
-
return patching_status
|
|
267
|
-
|
|
268
|
-
if not patching_status.and_condition(
|
|
269
|
-
self._test_type_and_finiteness(X), "Input X is not supported."
|
|
270
|
-
):
|
|
271
|
-
return patching_status
|
|
273
|
+
del self._onedal_estimator
|
|
274
|
+
super().fit(X, y)
|
|
272
275
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
)
|
|
276
|
-
|
|
277
|
-
return patching_status
|
|
276
|
+
def _onedal_predict(self, X, queue=None):
|
|
277
|
+
if sklearn_check_version("1.0"):
|
|
278
|
+
self._check_feature_names(X, reset=False)
|
|
278
279
|
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
280
|
+
X = self._validate_data(X, accept_sparse=False, reset=False)
|
|
281
|
+
if not hasattr(self, "_onedal_estimator"):
|
|
282
|
+
self._initialize_onedal_estimator()
|
|
283
|
+
self._onedal_estimator.coef_ = self.coef_
|
|
284
|
+
self._onedal_estimator.intercept_ = self.intercept_
|
|
282
285
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
f"sklearn.linear_model.{class_name}.predict"
|
|
286
|
-
)
|
|
286
|
+
res = self._onedal_estimator.predict(X, queue=queue)
|
|
287
|
+
return res
|
|
287
288
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
self.fit_intercept and issparse(self.intercept_)
|
|
291
|
-
)
|
|
292
|
-
dal_ready = patching_status.and_conditions(
|
|
293
|
-
[
|
|
294
|
-
(n_samples > 0, "Number of samples is less than 1."),
|
|
295
|
-
(not issparse(*data), "Sparse input is not supported."),
|
|
296
|
-
(not model_is_sparse, "Sparse coefficients are not supported."),
|
|
297
|
-
(hasattr(self, "_onedal_estimator"), "oneDAL model was not trained."),
|
|
298
|
-
]
|
|
299
|
-
)
|
|
300
|
-
if not dal_ready:
|
|
301
|
-
return patching_status
|
|
289
|
+
def get_coef_(self):
|
|
290
|
+
return self.coef_
|
|
302
291
|
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
292
|
+
def set_coef_(self, value):
|
|
293
|
+
self.__dict__["coef_"] = value
|
|
294
|
+
if hasattr(self, "_onedal_estimator"):
|
|
295
|
+
self._onedal_estimator.coef_ = value
|
|
296
|
+
del self._onedal_estimator._onedal_model
|
|
306
297
|
|
|
307
|
-
|
|
298
|
+
def get_intercept_(self):
|
|
299
|
+
return self.intercept_
|
|
308
300
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
raise RuntimeError(
|
|
315
|
-
f"Unknown method {method_name} in {self.__class__.__name__}"
|
|
316
|
-
)
|
|
301
|
+
def set_intercept_(self, value):
|
|
302
|
+
self.__dict__["intercept_"] = value
|
|
303
|
+
if hasattr(self, "_onedal_estimator"):
|
|
304
|
+
self._onedal_estimator.intercept_ = value
|
|
305
|
+
del self._onedal_estimator._onedal_model
|
|
317
306
|
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
onedal_params = {"fit_intercept": self.fit_intercept, "copy_X": self.copy_X}
|
|
326
|
-
self._onedal_estimator = onedal_LinearRegression(**onedal_params)
|
|
327
|
-
|
|
328
|
-
def _onedal_fit(self, X, y, sample_weight, queue=None):
|
|
329
|
-
assert sample_weight is None
|
|
330
|
-
|
|
331
|
-
check_params = {
|
|
332
|
-
"X": X,
|
|
333
|
-
"y": y,
|
|
334
|
-
"dtype": [np.float64, np.float32],
|
|
335
|
-
"accept_sparse": ["csr", "csc", "coo"],
|
|
336
|
-
"y_numeric": True,
|
|
337
|
-
"multi_output": True,
|
|
338
|
-
"force_all_finite": False,
|
|
339
|
-
}
|
|
340
|
-
if sklearn_check_version("1.2"):
|
|
341
|
-
X, y = self._validate_data(**check_params)
|
|
342
|
-
else:
|
|
343
|
-
X, y = check_X_y(**check_params)
|
|
344
|
-
|
|
345
|
-
if sklearn_check_version("1.0") and not sklearn_check_version("1.2"):
|
|
346
|
-
self._normalize = _deprecate_normalize(
|
|
347
|
-
self.normalize,
|
|
348
|
-
default=False,
|
|
349
|
-
estimator_name=self.__class__.__name__,
|
|
350
|
-
)
|
|
307
|
+
def _save_attributes(self):
|
|
308
|
+
self.coef_ = property(self.get_coef_, self.set_coef_)
|
|
309
|
+
self.intercept_ = property(self.get_intercept_, self.set_intercept_)
|
|
310
|
+
self.n_features_in_ = self._onedal_estimator.n_features_in_
|
|
311
|
+
self._sparse = False
|
|
312
|
+
self.__dict__["coef_"] = self._onedal_estimator.coef_
|
|
313
|
+
self.__dict__["intercept_"] = self._onedal_estimator.intercept_
|
|
351
314
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
self._onedal_estimator.fit(X, y, queue=queue)
|
|
355
|
-
self._save_attributes()
|
|
356
|
-
|
|
357
|
-
except RuntimeError:
|
|
358
|
-
logging.getLogger("sklearnex").info(
|
|
359
|
-
f"{self.__class__.__name__}.fit "
|
|
360
|
-
+ get_patch_message("sklearn_after_onedal")
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
del self._onedal_estimator
|
|
364
|
-
super().fit(X, y)
|
|
365
|
-
|
|
366
|
-
def _onedal_predict(self, X, queue=None):
|
|
367
|
-
X = self._validate_data(X, accept_sparse=False, reset=False)
|
|
368
|
-
if not hasattr(self, "_onedal_estimator"):
|
|
369
|
-
self._initialize_onedal_estimator()
|
|
370
|
-
self._onedal_estimator.coef_ = self.coef_
|
|
371
|
-
self._onedal_estimator.intercept_ = self.intercept_
|
|
372
|
-
|
|
373
|
-
return self._onedal_estimator.predict(X, queue=queue)
|
|
374
|
-
|
|
375
|
-
else:
|
|
376
|
-
from daal4py.sklearn.linear_model import LinearRegression
|
|
377
|
-
|
|
378
|
-
logging.warning(
|
|
379
|
-
"Sklearnex LinearRegression requires oneDAL version >= 2023.1 "
|
|
380
|
-
"but it was not found"
|
|
381
|
-
)
|
|
315
|
+
fit.__doc__ = sklearn_LinearRegression.fit.__doc__
|
|
316
|
+
predict.__doc__ = sklearn_LinearRegression.predict.__doc__
|