scikit-learn-intelex 2024.0.1__py311-none-manylinux1_x86_64.whl → 2024.4.0__py311-none-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of scikit-learn-intelex might be problematic. Click here for more details.
- {scikit_learn_intelex-2024.0.1.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/METADATA +2 -2
- scikit_learn_intelex-2024.4.0.dist-info/RECORD +101 -0
- sklearnex/__init__.py +11 -7
- sklearnex/__main__.py +0 -1
- sklearnex/_device_offload.py +31 -4
- sklearnex/_utils.py +15 -1
- sklearnex/basic_statistics/__init__.py +2 -2
- sklearnex/basic_statistics/incremental_basic_statistics.py +288 -0
- sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +386 -0
- sklearnex/cluster/__init__.py +0 -1
- sklearnex/cluster/dbscan.py +5 -2
- sklearnex/cluster/k_means.py +0 -1
- sklearnex/cluster/tests/test_dbscan.py +0 -1
- sklearnex/cluster/tests/test_kmeans.py +0 -1
- sklearnex/conftest.py +63 -0
- sklearnex/covariance/__init__.py +19 -0
- sklearnex/covariance/incremental_covariance.py +130 -0
- sklearnex/covariance/tests/test_incremental_covariance.py +143 -0
- sklearnex/decomposition/__init__.py +0 -1
- sklearnex/decomposition/pca.py +319 -2
- sklearnex/decomposition/tests/test_pca.py +34 -6
- sklearnex/dispatcher.py +93 -28
- sklearnex/ensemble/__init__.py +0 -1
- sklearnex/ensemble/_forest.py +93 -89
- sklearnex/ensemble/tests/test_forest.py +15 -20
- sklearnex/glob/__main__.py +0 -1
- sklearnex/glob/dispatcher.py +0 -1
- sklearnex/linear_model/__init__.py +1 -3
- sklearnex/linear_model/coordinate_descent.py +0 -1
- sklearnex/linear_model/linear.py +275 -332
- sklearnex/linear_model/logistic_path.py +0 -1
- sklearnex/linear_model/logistic_regression.py +385 -0
- sklearnex/linear_model/ridge.py +0 -1
- sklearnex/linear_model/tests/test_linear.py +47 -7
- sklearnex/linear_model/tests/test_logreg.py +70 -8
- sklearnex/manifold/__init__.py +0 -1
- sklearnex/manifold/t_sne.py +0 -1
- sklearnex/manifold/tests/test_tsne.py +0 -1
- sklearnex/metrics/__init__.py +0 -1
- sklearnex/metrics/pairwise.py +0 -1
- sklearnex/metrics/ranking.py +0 -1
- sklearnex/metrics/tests/test_metrics.py +0 -1
- sklearnex/model_selection/__init__.py +0 -1
- sklearnex/model_selection/split.py +0 -1
- sklearnex/model_selection/tests/test_model_selection.py +0 -1
- sklearnex/neighbors/__init__.py +1 -2
- sklearnex/neighbors/_lof.py +221 -0
- sklearnex/neighbors/common.py +5 -3
- sklearnex/neighbors/knn_classification.py +47 -133
- sklearnex/neighbors/knn_regression.py +20 -129
- sklearnex/neighbors/knn_unsupervised.py +15 -89
- sklearnex/neighbors/tests/test_neighbors.py +12 -17
- sklearnex/preview/__init__.py +1 -2
- sklearnex/preview/cluster/__init__.py +0 -1
- sklearnex/preview/cluster/k_means.py +7 -74
- sklearnex/preview/{decomposition → covariance}/__init__.py +19 -20
- sklearnex/preview/covariance/covariance.py +133 -0
- sklearnex/preview/covariance/tests/test_covariance.py +66 -0
- sklearnex/spmd/__init__.py +1 -0
- sklearnex/spmd/covariance/__init__.py +19 -0
- sklearnex/spmd/covariance/covariance.py +21 -0
- sklearnex/spmd/ensemble/forest.py +4 -12
- sklearnex/spmd/linear_model/__init__.py +2 -1
- sklearnex/spmd/linear_model/logistic_regression.py +21 -0
- sklearnex/svm/__init__.py +0 -1
- sklearnex/svm/_common.py +4 -7
- sklearnex/svm/nusvc.py +73 -49
- sklearnex/svm/nusvr.py +8 -52
- sklearnex/svm/svc.py +74 -51
- sklearnex/svm/svr.py +5 -49
- sklearnex/svm/tests/test_svm.py +0 -1
- sklearnex/tests/_utils.py +164 -0
- sklearnex/tests/test_memory_usage.py +9 -7
- sklearnex/tests/test_monkeypatch.py +192 -134
- sklearnex/tests/test_n_jobs_support.py +99 -0
- sklearnex/tests/test_parallel.py +6 -8
- sklearnex/tests/test_patching.py +338 -89
- sklearnex/utils/__init__.py +2 -1
- sklearnex/utils/_namespace.py +97 -0
- sklearnex/utils/validation.py +0 -1
- scikit_learn_intelex-2024.0.1.dist-info/RECORD +0 -90
- sklearnex/neighbors/lof.py +0 -437
- sklearnex/preview/decomposition/pca.py +0 -376
- sklearnex/preview/decomposition/tests/test_preview_pca.py +0 -38
- sklearnex/tests/_models_info.py +0 -170
- sklearnex/tests/utils/_launch_algorithms.py +0 -118
- {scikit_learn_intelex-2024.0.1.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/LICENSE.txt +0 -0
- {scikit_learn_intelex-2024.0.1.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/WHEEL +0 -0
- {scikit_learn_intelex-2024.0.1.dist-info → scikit_learn_intelex-2024.4.0.dist-info}/top_level.txt +0 -0
|
@@ -14,8 +14,6 @@
|
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
# ==============================================================================
|
|
16
16
|
|
|
17
|
-
from abc import ABC
|
|
18
|
-
|
|
19
17
|
from onedal.spmd.ensemble import RandomForestClassifier as onedal_RandomForestClassifier
|
|
20
18
|
from onedal.spmd.ensemble import RandomForestRegressor as onedal_RandomForestRegressor
|
|
21
19
|
|
|
@@ -23,16 +21,9 @@ from ...ensemble import RandomForestClassifier as RandomForestClassifier_Batch
|
|
|
23
21
|
from ...ensemble import RandomForestRegressor as RandomForestRegressor_Batch
|
|
24
22
|
|
|
25
23
|
|
|
26
|
-
class
|
|
27
|
-
def _onedal_classifier(self, **onedal_params):
|
|
28
|
-
return onedal_RandomForestClassifier(**onedal_params)
|
|
29
|
-
|
|
30
|
-
def _onedal_regressor(self, **onedal_params):
|
|
31
|
-
return onedal_RandomForestRegressor(**onedal_params)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class RandomForestClassifier(BaseForestSPMD, RandomForestClassifier_Batch):
|
|
24
|
+
class RandomForestClassifier(RandomForestClassifier_Batch):
|
|
35
25
|
__doc__ = RandomForestClassifier_Batch.__doc__
|
|
26
|
+
_onedal_factory = onedal_RandomForestClassifier
|
|
36
27
|
|
|
37
28
|
def _onedal_cpu_supported(self, method_name, *data):
|
|
38
29
|
# TODO:
|
|
@@ -55,8 +46,9 @@ class RandomForestClassifier(BaseForestSPMD, RandomForestClassifier_Batch):
|
|
|
55
46
|
return ready
|
|
56
47
|
|
|
57
48
|
|
|
58
|
-
class RandomForestRegressor(
|
|
49
|
+
class RandomForestRegressor(RandomForestRegressor_Batch):
|
|
59
50
|
__doc__ = RandomForestRegressor_Batch.__doc__
|
|
51
|
+
_onedal_factory = onedal_RandomForestRegressor
|
|
60
52
|
|
|
61
53
|
def _onedal_cpu_supported(self, method_name, *data):
|
|
62
54
|
# TODO:
|
|
@@ -15,5 +15,6 @@
|
|
|
15
15
|
# ==============================================================================
|
|
16
16
|
|
|
17
17
|
from .linear_model import LinearRegression
|
|
18
|
+
from .logistic_regression import LogisticRegression
|
|
18
19
|
|
|
19
|
-
__all__ = ["LinearRegression"]
|
|
20
|
+
__all__ = ["LinearRegression", "LogisticRegression"]
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
from onedal.spmd.linear_model import LogisticRegression
|
|
18
|
+
|
|
19
|
+
# TODO:
|
|
20
|
+
# Currently it uses `onedal` module interface.
|
|
21
|
+
# Add sklearnex dispatching.
|
sklearnex/svm/__init__.py
CHANGED
sklearnex/svm/_common.py
CHANGED
|
@@ -76,7 +76,7 @@ class BaseSVM(ABC):
|
|
|
76
76
|
inference_methods = (
|
|
77
77
|
["predict"]
|
|
78
78
|
if class_name.endswith("R")
|
|
79
|
-
else ["predict", "predict_proba", "decision_function"]
|
|
79
|
+
else ["predict", "predict_proba", "decision_function", "score"]
|
|
80
80
|
)
|
|
81
81
|
if method_name in inference_methods:
|
|
82
82
|
patching_status.and_conditions(
|
|
@@ -111,12 +111,9 @@ class BaseSVC(BaseSVM):
|
|
|
111
111
|
cv = StratifiedKFold(
|
|
112
112
|
n_splits=n_splits, shuffle=True, random_state=self.random_state
|
|
113
113
|
)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
)
|
|
118
|
-
else:
|
|
119
|
-
self.clf_prob = CalibratedClassifierCV(clf_base, cv=cv, method="sigmoid")
|
|
114
|
+
self.clf_prob = CalibratedClassifierCV(
|
|
115
|
+
clf_base, ensemble=False, cv=cv, method="sigmoid", n_jobs=n_jobs
|
|
116
|
+
)
|
|
120
117
|
self.clf_prob.fit(X, y, sample_weight)
|
|
121
118
|
except ValueError:
|
|
122
119
|
clf_base = clf_base.fit(X, y, sample_weight)
|
sklearnex/svm/nusvc.py
CHANGED
|
@@ -14,11 +14,15 @@
|
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
# ==============================================================================
|
|
16
16
|
|
|
17
|
+
import numpy as np
|
|
17
18
|
from sklearn.exceptions import NotFittedError
|
|
19
|
+
from sklearn.metrics import accuracy_score
|
|
18
20
|
from sklearn.svm import NuSVC as sklearn_NuSVC
|
|
19
21
|
from sklearn.utils.validation import _deprecate_positional_args
|
|
20
22
|
|
|
23
|
+
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
21
24
|
from daal4py.sklearn._utils import sklearn_check_version
|
|
25
|
+
from sklearnex.utils import get_namespace
|
|
22
26
|
|
|
23
27
|
from .._device_offload import dispatch, wrap_output_data
|
|
24
28
|
from ._common import BaseSVC
|
|
@@ -29,6 +33,9 @@ if sklearn_check_version("1.0"):
|
|
|
29
33
|
from onedal.svm import NuSVC as onedal_NuSVC
|
|
30
34
|
|
|
31
35
|
|
|
36
|
+
@control_n_jobs(
|
|
37
|
+
decorated_methods=["fit", "predict", "_predict_proba", "decision_function", "score"]
|
|
38
|
+
)
|
|
32
39
|
class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
33
40
|
__doc__ = sklearn_NuSVC.__doc__
|
|
34
41
|
|
|
@@ -74,39 +81,6 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
74
81
|
)
|
|
75
82
|
|
|
76
83
|
def fit(self, X, y, sample_weight=None):
|
|
77
|
-
"""
|
|
78
|
-
Fit the SVM model according to the given training data.
|
|
79
|
-
|
|
80
|
-
Parameters
|
|
81
|
-
----------
|
|
82
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
|
|
83
|
-
or (n_samples, n_samples)
|
|
84
|
-
Training vectors, where `n_samples` is the number of samples
|
|
85
|
-
and `n_features` is the number of features.
|
|
86
|
-
For kernel="precomputed", the expected shape of X is
|
|
87
|
-
(n_samples, n_samples).
|
|
88
|
-
|
|
89
|
-
y : array-like of shape (n_samples,)
|
|
90
|
-
Target values (class labels in classification, real numbers in
|
|
91
|
-
regression).
|
|
92
|
-
|
|
93
|
-
sample_weight : array-like of shape (n_samples,), default=None
|
|
94
|
-
Per-sample weights. Rescale C per sample. Higher weights
|
|
95
|
-
force the classifier to put more emphasis on these points.
|
|
96
|
-
|
|
97
|
-
Returns
|
|
98
|
-
-------
|
|
99
|
-
self : object
|
|
100
|
-
Fitted estimator.
|
|
101
|
-
|
|
102
|
-
Notes
|
|
103
|
-
-----
|
|
104
|
-
If X and y are not C-ordered and contiguous arrays of np.float64 and
|
|
105
|
-
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
|
|
106
|
-
|
|
107
|
-
If X is a dense array, then the other methods will not support sparse
|
|
108
|
-
matrices as input.
|
|
109
|
-
"""
|
|
110
84
|
if sklearn_check_version("1.2"):
|
|
111
85
|
self._validate_params()
|
|
112
86
|
if sklearn_check_version("1.0"):
|
|
@@ -127,22 +101,6 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
127
101
|
|
|
128
102
|
@wrap_output_data
|
|
129
103
|
def predict(self, X):
|
|
130
|
-
"""
|
|
131
|
-
Perform regression on samples in X.
|
|
132
|
-
|
|
133
|
-
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
|
|
134
|
-
|
|
135
|
-
Parameters
|
|
136
|
-
----------
|
|
137
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
138
|
-
For kernel="precomputed", the expected shape of X is
|
|
139
|
-
(n_samples_test, n_samples_train).
|
|
140
|
-
|
|
141
|
-
Returns
|
|
142
|
-
-------
|
|
143
|
-
y_pred : ndarray of shape (n_samples,)
|
|
144
|
-
The predicted values.
|
|
145
|
-
"""
|
|
146
104
|
if sklearn_check_version("1.0"):
|
|
147
105
|
self._check_feature_names(X, reset=False)
|
|
148
106
|
return dispatch(
|
|
@@ -155,6 +113,22 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
155
113
|
X,
|
|
156
114
|
)
|
|
157
115
|
|
|
116
|
+
@wrap_output_data
|
|
117
|
+
def score(self, X, y, sample_weight=None):
|
|
118
|
+
if sklearn_check_version("1.0"):
|
|
119
|
+
self._check_feature_names(X, reset=False)
|
|
120
|
+
return dispatch(
|
|
121
|
+
self,
|
|
122
|
+
"score",
|
|
123
|
+
{
|
|
124
|
+
"onedal": self.__class__._onedal_score,
|
|
125
|
+
"sklearn": sklearn_NuSVC.score,
|
|
126
|
+
},
|
|
127
|
+
X,
|
|
128
|
+
y,
|
|
129
|
+
sample_weight=sample_weight,
|
|
130
|
+
)
|
|
131
|
+
|
|
158
132
|
if sklearn_check_version("1.0"):
|
|
159
133
|
|
|
160
134
|
@available_if(sklearn_NuSVC._check_proba)
|
|
@@ -187,6 +161,38 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
187
161
|
"""
|
|
188
162
|
return self._predict_proba(X)
|
|
189
163
|
|
|
164
|
+
@available_if(sklearn_NuSVC._check_proba)
|
|
165
|
+
def predict_log_proba(self, X):
|
|
166
|
+
"""Compute log probabilities of possible outcomes for samples in X.
|
|
167
|
+
|
|
168
|
+
The model need to have probability information computed at training
|
|
169
|
+
time: fit with attribute `probability` set to True.
|
|
170
|
+
|
|
171
|
+
Parameters
|
|
172
|
+
----------
|
|
173
|
+
X : array-like of shape (n_samples, n_features) or \
|
|
174
|
+
(n_samples_test, n_samples_train)
|
|
175
|
+
For kernel="precomputed", the expected shape of X is
|
|
176
|
+
(n_samples_test, n_samples_train).
|
|
177
|
+
|
|
178
|
+
Returns
|
|
179
|
+
-------
|
|
180
|
+
T : ndarray of shape (n_samples, n_classes)
|
|
181
|
+
Returns the log-probabilities of the sample for each class in
|
|
182
|
+
the model. The columns correspond to the classes in sorted
|
|
183
|
+
order, as they appear in the attribute :term:`classes_`.
|
|
184
|
+
|
|
185
|
+
Notes
|
|
186
|
+
-----
|
|
187
|
+
The probability model is created using cross validation, so
|
|
188
|
+
the results can be slightly different than those obtained by
|
|
189
|
+
predict. Also, it will produce meaningless results on very small
|
|
190
|
+
datasets.
|
|
191
|
+
"""
|
|
192
|
+
xp, _ = get_namespace(X)
|
|
193
|
+
|
|
194
|
+
return xp.log(self.predict_proba(X))
|
|
195
|
+
|
|
190
196
|
else:
|
|
191
197
|
|
|
192
198
|
@property
|
|
@@ -194,6 +200,12 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
194
200
|
self._check_proba()
|
|
195
201
|
return self._predict_proba
|
|
196
202
|
|
|
203
|
+
def _predict_log_proba(self, X):
|
|
204
|
+
xp, _ = get_namespace(X)
|
|
205
|
+
return xp.log(self.predict_proba(X))
|
|
206
|
+
|
|
207
|
+
predict_proba.__doc__ = sklearn_NuSVC.predict_proba.__doc__
|
|
208
|
+
|
|
197
209
|
@wrap_output_data
|
|
198
210
|
def _predict_proba(self, X):
|
|
199
211
|
if sklearn_check_version("1.0"):
|
|
@@ -228,6 +240,8 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
228
240
|
X,
|
|
229
241
|
)
|
|
230
242
|
|
|
243
|
+
decision_function.__doc__ = sklearn_NuSVC.decision_function.__doc__
|
|
244
|
+
|
|
231
245
|
def _onedal_fit(self, X, y, sample_weight=None, queue=None):
|
|
232
246
|
onedal_params = {
|
|
233
247
|
"nu": self.nu,
|
|
@@ -270,3 +284,13 @@ class NuSVC(sklearn_NuSVC, BaseSVC):
|
|
|
270
284
|
|
|
271
285
|
def _onedal_decision_function(self, X, queue=None):
|
|
272
286
|
return self._onedal_estimator.decision_function(X, queue=queue)
|
|
287
|
+
|
|
288
|
+
def _onedal_score(self, X, y, sample_weight=None, queue=None):
|
|
289
|
+
return accuracy_score(
|
|
290
|
+
y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
fit.__doc__ = sklearn_NuSVC.fit.__doc__
|
|
294
|
+
predict.__doc__ = sklearn_NuSVC.predict.__doc__
|
|
295
|
+
decision_function.__doc__ = sklearn_NuSVC.decision_function.__doc__
|
|
296
|
+
score.__doc__ = sklearn_NuSVC.score.__doc__
|
sklearnex/svm/nusvr.py
CHANGED
|
@@ -17,6 +17,7 @@
|
|
|
17
17
|
from sklearn.svm import NuSVR as sklearn_NuSVR
|
|
18
18
|
from sklearn.utils.validation import _deprecate_positional_args
|
|
19
19
|
|
|
20
|
+
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
20
21
|
from daal4py.sklearn._utils import sklearn_check_version
|
|
21
22
|
from onedal.svm import NuSVR as onedal_NuSVR
|
|
22
23
|
|
|
@@ -24,6 +25,7 @@ from .._device_offload import dispatch, wrap_output_data
|
|
|
24
25
|
from ._common import BaseSVR
|
|
25
26
|
|
|
26
27
|
|
|
28
|
+
@control_n_jobs(decorated_methods=["fit", "predict"])
|
|
27
29
|
class NuSVR(sklearn_NuSVR, BaseSVR):
|
|
28
30
|
__doc__ = sklearn_NuSVR.__doc__
|
|
29
31
|
|
|
@@ -34,14 +36,14 @@ class NuSVR(sklearn_NuSVR, BaseSVR):
|
|
|
34
36
|
def __init__(
|
|
35
37
|
self,
|
|
36
38
|
*,
|
|
39
|
+
nu=0.5,
|
|
40
|
+
C=1.0,
|
|
37
41
|
kernel="rbf",
|
|
38
42
|
degree=3,
|
|
39
43
|
gamma="scale",
|
|
40
44
|
coef0=0.0,
|
|
41
|
-
tol=1e-3,
|
|
42
|
-
C=1.0,
|
|
43
|
-
nu=0.5,
|
|
44
45
|
shrinking=True,
|
|
46
|
+
tol=1e-3,
|
|
45
47
|
cache_size=200,
|
|
46
48
|
verbose=False,
|
|
47
49
|
max_iter=-1,
|
|
@@ -61,39 +63,6 @@ class NuSVR(sklearn_NuSVR, BaseSVR):
|
|
|
61
63
|
)
|
|
62
64
|
|
|
63
65
|
def fit(self, X, y, sample_weight=None):
|
|
64
|
-
"""
|
|
65
|
-
Fit the SVM model according to the given training data.
|
|
66
|
-
|
|
67
|
-
Parameters
|
|
68
|
-
----------
|
|
69
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
|
|
70
|
-
or (n_samples, n_samples)
|
|
71
|
-
Training vectors, where `n_samples` is the number of samples
|
|
72
|
-
and `n_features` is the number of features.
|
|
73
|
-
For kernel="precomputed", the expected shape of X is
|
|
74
|
-
(n_samples, n_samples).
|
|
75
|
-
|
|
76
|
-
y : array-like of shape (n_samples,)
|
|
77
|
-
Target values (class labels in classification, real numbers in
|
|
78
|
-
regression).
|
|
79
|
-
|
|
80
|
-
sample_weight : array-like of shape (n_samples,), default=None
|
|
81
|
-
Per-sample weights. Rescale C per sample. Higher weights
|
|
82
|
-
force the classifier to put more emphasis on these points.
|
|
83
|
-
|
|
84
|
-
Returns
|
|
85
|
-
-------
|
|
86
|
-
self : object
|
|
87
|
-
Fitted estimator.
|
|
88
|
-
|
|
89
|
-
Notes
|
|
90
|
-
-----
|
|
91
|
-
If X and y are not C-ordered and contiguous arrays of np.float64 and
|
|
92
|
-
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
|
|
93
|
-
|
|
94
|
-
If X is a dense array, then the other methods will not support sparse
|
|
95
|
-
matrices as input.
|
|
96
|
-
"""
|
|
97
66
|
if sklearn_check_version("1.2"):
|
|
98
67
|
self._validate_params()
|
|
99
68
|
if sklearn_check_version("1.0"):
|
|
@@ -113,22 +82,6 @@ class NuSVR(sklearn_NuSVR, BaseSVR):
|
|
|
113
82
|
|
|
114
83
|
@wrap_output_data
|
|
115
84
|
def predict(self, X):
|
|
116
|
-
"""
|
|
117
|
-
Perform regression on samples in X.
|
|
118
|
-
|
|
119
|
-
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
|
|
120
|
-
|
|
121
|
-
Parameters
|
|
122
|
-
----------
|
|
123
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
124
|
-
For kernel="precomputed", the expected shape of X is
|
|
125
|
-
(n_samples_test, n_samples_train).
|
|
126
|
-
|
|
127
|
-
Returns
|
|
128
|
-
-------
|
|
129
|
-
y_pred : ndarray of shape (n_samples,)
|
|
130
|
-
The predicted values.
|
|
131
|
-
"""
|
|
132
85
|
if sklearn_check_version("1.0"):
|
|
133
86
|
self._check_feature_names(X, reset=False)
|
|
134
87
|
return dispatch(
|
|
@@ -161,3 +114,6 @@ class NuSVR(sklearn_NuSVR, BaseSVR):
|
|
|
161
114
|
|
|
162
115
|
def _onedal_predict(self, X, queue=None):
|
|
163
116
|
return self._onedal_estimator.predict(X, queue=queue)
|
|
117
|
+
|
|
118
|
+
fit.__doc__ = sklearn_NuSVR.fit.__doc__
|
|
119
|
+
predict.__doc__ = sklearn_NuSVR.predict.__doc__
|
sklearnex/svm/svc.py
CHANGED
|
@@ -17,10 +17,13 @@
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
from scipy import sparse as sp
|
|
19
19
|
from sklearn.exceptions import NotFittedError
|
|
20
|
+
from sklearn.metrics import accuracy_score
|
|
20
21
|
from sklearn.svm import SVC as sklearn_SVC
|
|
21
22
|
from sklearn.utils.validation import _deprecate_positional_args
|
|
22
23
|
|
|
24
|
+
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
23
25
|
from daal4py.sklearn._utils import sklearn_check_version
|
|
26
|
+
from sklearnex.utils import get_namespace
|
|
24
27
|
|
|
25
28
|
from .._device_offload import dispatch, wrap_output_data
|
|
26
29
|
from .._utils import PatchingConditionsChain
|
|
@@ -32,6 +35,9 @@ if sklearn_check_version("1.0"):
|
|
|
32
35
|
from onedal.svm import SVC as onedal_SVC
|
|
33
36
|
|
|
34
37
|
|
|
38
|
+
@control_n_jobs(
|
|
39
|
+
decorated_methods=["fit", "predict", "_predict_proba", "decision_function", "score"]
|
|
40
|
+
)
|
|
35
41
|
class SVC(sklearn_SVC, BaseSVC):
|
|
36
42
|
__doc__ = sklearn_SVC.__doc__
|
|
37
43
|
|
|
@@ -77,39 +83,6 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
77
83
|
)
|
|
78
84
|
|
|
79
85
|
def fit(self, X, y, sample_weight=None):
|
|
80
|
-
"""
|
|
81
|
-
Fit the SVM model according to the given training data.
|
|
82
|
-
|
|
83
|
-
Parameters
|
|
84
|
-
----------
|
|
85
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
|
|
86
|
-
or (n_samples, n_samples)
|
|
87
|
-
Training vectors, where `n_samples` is the number of samples
|
|
88
|
-
and `n_features` is the number of features.
|
|
89
|
-
For kernel="precomputed", the expected shape of X is
|
|
90
|
-
(n_samples, n_samples).
|
|
91
|
-
|
|
92
|
-
y : array-like of shape (n_samples,)
|
|
93
|
-
Target values (class labels in classification, real numbers in
|
|
94
|
-
regression).
|
|
95
|
-
|
|
96
|
-
sample_weight : array-like of shape (n_samples,), default=None
|
|
97
|
-
Per-sample weights. Rescale C per sample. Higher weights
|
|
98
|
-
force the classifier to put more emphasis on these points.
|
|
99
|
-
|
|
100
|
-
Returns
|
|
101
|
-
-------
|
|
102
|
-
self : object
|
|
103
|
-
Fitted estimator.
|
|
104
|
-
|
|
105
|
-
Notes
|
|
106
|
-
-----
|
|
107
|
-
If X and y are not C-ordered and contiguous arrays of np.float64 and
|
|
108
|
-
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
|
|
109
|
-
|
|
110
|
-
If X is a dense array, then the other methods will not support sparse
|
|
111
|
-
matrices as input.
|
|
112
|
-
"""
|
|
113
86
|
if sklearn_check_version("1.2"):
|
|
114
87
|
self._validate_params()
|
|
115
88
|
if sklearn_check_version("1.0"):
|
|
@@ -129,22 +102,6 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
129
102
|
|
|
130
103
|
@wrap_output_data
|
|
131
104
|
def predict(self, X):
|
|
132
|
-
"""
|
|
133
|
-
Perform regression on samples in X.
|
|
134
|
-
|
|
135
|
-
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
|
|
136
|
-
|
|
137
|
-
Parameters
|
|
138
|
-
----------
|
|
139
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
140
|
-
For kernel="precomputed", the expected shape of X is
|
|
141
|
-
(n_samples_test, n_samples_train).
|
|
142
|
-
|
|
143
|
-
Returns
|
|
144
|
-
-------
|
|
145
|
-
y_pred : ndarray of shape (n_samples,)
|
|
146
|
-
The predicted values.
|
|
147
|
-
"""
|
|
148
105
|
if sklearn_check_version("1.0"):
|
|
149
106
|
self._check_feature_names(X, reset=False)
|
|
150
107
|
return dispatch(
|
|
@@ -157,6 +114,22 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
157
114
|
X,
|
|
158
115
|
)
|
|
159
116
|
|
|
117
|
+
@wrap_output_data
|
|
118
|
+
def score(self, X, y, sample_weight=None):
|
|
119
|
+
if sklearn_check_version("1.0"):
|
|
120
|
+
self._check_feature_names(X, reset=False)
|
|
121
|
+
return dispatch(
|
|
122
|
+
self,
|
|
123
|
+
"score",
|
|
124
|
+
{
|
|
125
|
+
"onedal": self.__class__._onedal_score,
|
|
126
|
+
"sklearn": sklearn_SVC.score,
|
|
127
|
+
},
|
|
128
|
+
X,
|
|
129
|
+
y,
|
|
130
|
+
sample_weight=sample_weight,
|
|
131
|
+
)
|
|
132
|
+
|
|
160
133
|
if sklearn_check_version("1.0"):
|
|
161
134
|
|
|
162
135
|
@available_if(sklearn_SVC._check_proba)
|
|
@@ -189,6 +162,38 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
189
162
|
"""
|
|
190
163
|
return self._predict_proba(X)
|
|
191
164
|
|
|
165
|
+
@available_if(sklearn_SVC._check_proba)
|
|
166
|
+
def predict_log_proba(self, X):
|
|
167
|
+
"""Compute log probabilities of possible outcomes for samples in X.
|
|
168
|
+
|
|
169
|
+
The model need to have probability information computed at training
|
|
170
|
+
time: fit with attribute `probability` set to True.
|
|
171
|
+
|
|
172
|
+
Parameters
|
|
173
|
+
----------
|
|
174
|
+
X : array-like of shape (n_samples, n_features) or \
|
|
175
|
+
(n_samples_test, n_samples_train)
|
|
176
|
+
For kernel="precomputed", the expected shape of X is
|
|
177
|
+
(n_samples_test, n_samples_train).
|
|
178
|
+
|
|
179
|
+
Returns
|
|
180
|
+
-------
|
|
181
|
+
T : ndarray of shape (n_samples, n_classes)
|
|
182
|
+
Returns the log-probabilities of the sample for each class in
|
|
183
|
+
the model. The columns correspond to the classes in sorted
|
|
184
|
+
order, as they appear in the attribute :term:`classes_`.
|
|
185
|
+
|
|
186
|
+
Notes
|
|
187
|
+
-----
|
|
188
|
+
The probability model is created using cross validation, so
|
|
189
|
+
the results can be slightly different than those obtained by
|
|
190
|
+
predict. Also, it will produce meaningless results on very small
|
|
191
|
+
datasets.
|
|
192
|
+
"""
|
|
193
|
+
xp, _ = get_namespace(X)
|
|
194
|
+
|
|
195
|
+
return xp.log(self.predict_proba(X))
|
|
196
|
+
|
|
192
197
|
else:
|
|
193
198
|
|
|
194
199
|
@property
|
|
@@ -196,6 +201,12 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
196
201
|
self._check_proba()
|
|
197
202
|
return self._predict_proba
|
|
198
203
|
|
|
204
|
+
def _predict_log_proba(self, X):
|
|
205
|
+
xp, _ = get_namespace(X)
|
|
206
|
+
return xp.log(self.predict_proba(X))
|
|
207
|
+
|
|
208
|
+
predict_proba.__doc__ = sklearn_SVC.predict_proba.__doc__
|
|
209
|
+
|
|
199
210
|
@wrap_output_data
|
|
200
211
|
def _predict_proba(self, X):
|
|
201
212
|
sklearn_pred_proba = (
|
|
@@ -228,6 +239,8 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
228
239
|
X,
|
|
229
240
|
)
|
|
230
241
|
|
|
242
|
+
decision_function.__doc__ = sklearn_SVC.decision_function.__doc__
|
|
243
|
+
|
|
231
244
|
def _onedal_gpu_supported(self, method_name, *data):
|
|
232
245
|
class_name = self.__class__.__name__
|
|
233
246
|
patching_status = PatchingConditionsChain(
|
|
@@ -235,7 +248,7 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
235
248
|
)
|
|
236
249
|
if len(data) > 1:
|
|
237
250
|
self._class_count = len(np.unique(data[1]))
|
|
238
|
-
self._is_sparse = sp.
|
|
251
|
+
self._is_sparse = sp.issparse(data[0])
|
|
239
252
|
conditions = [
|
|
240
253
|
(
|
|
241
254
|
self.kernel in ["linear", "rbf"],
|
|
@@ -249,7 +262,7 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
249
262
|
if method_name == "fit":
|
|
250
263
|
patching_status.and_conditions(conditions)
|
|
251
264
|
return patching_status
|
|
252
|
-
if method_name in ["predict", "predict_proba", "decision_function"]:
|
|
265
|
+
if method_name in ["predict", "predict_proba", "decision_function", "score"]:
|
|
253
266
|
conditions.append(
|
|
254
267
|
(hasattr(self, "_onedal_estimator"), "oneDAL model was not trained")
|
|
255
268
|
)
|
|
@@ -299,3 +312,13 @@ class SVC(sklearn_SVC, BaseSVC):
|
|
|
299
312
|
|
|
300
313
|
def _onedal_decision_function(self, X, queue=None):
|
|
301
314
|
return self._onedal_estimator.decision_function(X, queue=queue)
|
|
315
|
+
|
|
316
|
+
def _onedal_score(self, X, y, sample_weight=None, queue=None):
|
|
317
|
+
return accuracy_score(
|
|
318
|
+
y, self._onedal_predict(X, queue=queue), sample_weight=sample_weight
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
fit.__doc__ = sklearn_SVC.fit.__doc__
|
|
322
|
+
predict.__doc__ = sklearn_SVC.predict.__doc__
|
|
323
|
+
decision_function.__doc__ = sklearn_SVC.decision_function.__doc__
|
|
324
|
+
score.__doc__ = sklearn_SVC.score.__doc__
|
sklearnex/svm/svr.py
CHANGED
|
@@ -17,6 +17,7 @@
|
|
|
17
17
|
from sklearn.svm import SVR as sklearn_SVR
|
|
18
18
|
from sklearn.utils.validation import _deprecate_positional_args
|
|
19
19
|
|
|
20
|
+
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
20
21
|
from daal4py.sklearn._utils import sklearn_check_version
|
|
21
22
|
from onedal.svm import SVR as onedal_SVR
|
|
22
23
|
|
|
@@ -24,6 +25,7 @@ from .._device_offload import dispatch, wrap_output_data
|
|
|
24
25
|
from ._common import BaseSVR
|
|
25
26
|
|
|
26
27
|
|
|
28
|
+
@control_n_jobs(decorated_methods=["fit", "predict"])
|
|
27
29
|
class SVR(sklearn_SVR, BaseSVR):
|
|
28
30
|
__doc__ = sklearn_SVR.__doc__
|
|
29
31
|
|
|
@@ -61,39 +63,6 @@ class SVR(sklearn_SVR, BaseSVR):
|
|
|
61
63
|
)
|
|
62
64
|
|
|
63
65
|
def fit(self, X, y, sample_weight=None):
|
|
64
|
-
"""
|
|
65
|
-
Fit the SVM model according to the given training data.
|
|
66
|
-
|
|
67
|
-
Parameters
|
|
68
|
-
----------
|
|
69
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
|
|
70
|
-
or (n_samples, n_samples)
|
|
71
|
-
Training vectors, where `n_samples` is the number of samples
|
|
72
|
-
and `n_features` is the number of features.
|
|
73
|
-
For kernel="precomputed", the expected shape of X is
|
|
74
|
-
(n_samples, n_samples).
|
|
75
|
-
|
|
76
|
-
y : array-like of shape (n_samples,)
|
|
77
|
-
Target values (class labels in classification, real numbers in
|
|
78
|
-
regression).
|
|
79
|
-
|
|
80
|
-
sample_weight : array-like of shape (n_samples,), default=None
|
|
81
|
-
Per-sample weights. Rescale C per sample. Higher weights
|
|
82
|
-
force the classifier to put more emphasis on these points.
|
|
83
|
-
|
|
84
|
-
Returns
|
|
85
|
-
-------
|
|
86
|
-
self : object
|
|
87
|
-
Fitted estimator.
|
|
88
|
-
|
|
89
|
-
Notes
|
|
90
|
-
-----
|
|
91
|
-
If X and y are not C-ordered and contiguous arrays of np.float64 and
|
|
92
|
-
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
|
|
93
|
-
|
|
94
|
-
If X is a dense array, then the other methods will not support sparse
|
|
95
|
-
matrices as input.
|
|
96
|
-
"""
|
|
97
66
|
if sklearn_check_version("1.2"):
|
|
98
67
|
self._validate_params()
|
|
99
68
|
if sklearn_check_version("1.0"):
|
|
@@ -114,22 +83,6 @@ class SVR(sklearn_SVR, BaseSVR):
|
|
|
114
83
|
|
|
115
84
|
@wrap_output_data
|
|
116
85
|
def predict(self, X):
|
|
117
|
-
"""
|
|
118
|
-
Perform regression on samples in X.
|
|
119
|
-
|
|
120
|
-
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
|
|
121
|
-
|
|
122
|
-
Parameters
|
|
123
|
-
----------
|
|
124
|
-
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
|
125
|
-
For kernel="precomputed", the expected shape of X is
|
|
126
|
-
(n_samples_test, n_samples_train).
|
|
127
|
-
|
|
128
|
-
Returns
|
|
129
|
-
-------
|
|
130
|
-
y_pred : ndarray of shape (n_samples,)
|
|
131
|
-
The predicted values.
|
|
132
|
-
"""
|
|
133
86
|
if sklearn_check_version("1.0"):
|
|
134
87
|
self._check_feature_names(X, reset=False)
|
|
135
88
|
return dispatch(
|
|
@@ -162,3 +115,6 @@ class SVR(sklearn_SVR, BaseSVR):
|
|
|
162
115
|
|
|
163
116
|
def _onedal_predict(self, X, queue=None):
|
|
164
117
|
return self._onedal_estimator.predict(X, queue=queue)
|
|
118
|
+
|
|
119
|
+
fit.__doc__ = sklearn_SVR.fit.__doc__
|
|
120
|
+
predict.__doc__ = sklearn_SVR.predict.__doc__
|
sklearnex/svm/tests/test_svm.py
CHANGED