scikit-learn-intelex 2025.1.0__py311-none-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of scikit-learn-intelex might be problematic. Click here for more details.
- daal4py/__init__.py +73 -0
- daal4py/__main__.py +58 -0
- daal4py/_daal4py.cpython-311-x86_64-linux-gnu.so +0 -0
- daal4py/doc/third-party-programs.txt +424 -0
- daal4py/mb/__init__.py +19 -0
- daal4py/mb/model_builders.py +377 -0
- daal4py/mpi_transceiver.cpython-311-x86_64-linux-gnu.so +0 -0
- daal4py/sklearn/__init__.py +40 -0
- daal4py/sklearn/_n_jobs_support.py +248 -0
- daal4py/sklearn/_utils.py +245 -0
- daal4py/sklearn/cluster/__init__.py +20 -0
- daal4py/sklearn/cluster/dbscan.py +165 -0
- daal4py/sklearn/cluster/k_means.py +597 -0
- daal4py/sklearn/cluster/tests/test_dbscan.py +109 -0
- daal4py/sklearn/decomposition/__init__.py +19 -0
- daal4py/sklearn/decomposition/_pca.py +524 -0
- daal4py/sklearn/ensemble/AdaBoostClassifier.py +196 -0
- daal4py/sklearn/ensemble/GBTDAAL.py +337 -0
- daal4py/sklearn/ensemble/__init__.py +27 -0
- daal4py/sklearn/ensemble/_forest.py +1397 -0
- daal4py/sklearn/ensemble/tests/test_decision_forest.py +206 -0
- daal4py/sklearn/linear_model/__init__.py +29 -0
- daal4py/sklearn/linear_model/_coordinate_descent.py +848 -0
- daal4py/sklearn/linear_model/_linear.py +272 -0
- daal4py/sklearn/linear_model/_ridge.py +325 -0
- daal4py/sklearn/linear_model/coordinate_descent.py +17 -0
- daal4py/sklearn/linear_model/linear.py +17 -0
- daal4py/sklearn/linear_model/logistic_loss.py +195 -0
- daal4py/sklearn/linear_model/logistic_path.py +1026 -0
- daal4py/sklearn/linear_model/ridge.py +17 -0
- daal4py/sklearn/linear_model/tests/test_linear.py +208 -0
- daal4py/sklearn/linear_model/tests/test_ridge.py +69 -0
- daal4py/sklearn/manifold/__init__.py +19 -0
- daal4py/sklearn/manifold/_t_sne.py +405 -0
- daal4py/sklearn/metrics/__init__.py +20 -0
- daal4py/sklearn/metrics/_pairwise.py +236 -0
- daal4py/sklearn/metrics/_ranking.py +210 -0
- daal4py/sklearn/model_selection/__init__.py +19 -0
- daal4py/sklearn/model_selection/_split.py +309 -0
- daal4py/sklearn/model_selection/tests/test_split.py +56 -0
- daal4py/sklearn/monkeypatch/__init__.py +0 -0
- daal4py/sklearn/monkeypatch/dispatcher.py +232 -0
- daal4py/sklearn/monkeypatch/tests/_models_info.py +161 -0
- daal4py/sklearn/monkeypatch/tests/test_monkeypatch.py +71 -0
- daal4py/sklearn/monkeypatch/tests/test_patching.py +90 -0
- daal4py/sklearn/monkeypatch/tests/utils/_launch_algorithms.py +117 -0
- daal4py/sklearn/neighbors/__init__.py +21 -0
- daal4py/sklearn/neighbors/_base.py +503 -0
- daal4py/sklearn/neighbors/_classification.py +139 -0
- daal4py/sklearn/neighbors/_regression.py +74 -0
- daal4py/sklearn/neighbors/_unsupervised.py +55 -0
- daal4py/sklearn/neighbors/tests/test_kneighbors.py +113 -0
- daal4py/sklearn/svm/__init__.py +19 -0
- daal4py/sklearn/svm/svm.py +734 -0
- daal4py/sklearn/utils/__init__.py +21 -0
- daal4py/sklearn/utils/base.py +75 -0
- daal4py/sklearn/utils/tests/test_utils.py +51 -0
- daal4py/sklearn/utils/validation.py +693 -0
- onedal/__init__.py +83 -0
- onedal/_config.py +54 -0
- onedal/_device_offload.py +222 -0
- onedal/_onedal_py_dpc.cpython-311-x86_64-linux-gnu.so +0 -0
- onedal/_onedal_py_host.cpython-311-x86_64-linux-gnu.so +0 -0
- onedal/_onedal_py_spmd_dpc.cpython-311-x86_64-linux-gnu.so +0 -0
- onedal/basic_statistics/__init__.py +20 -0
- onedal/basic_statistics/basic_statistics.py +107 -0
- onedal/basic_statistics/incremental_basic_statistics.py +160 -0
- onedal/basic_statistics/tests/test_basic_statistics.py +298 -0
- onedal/basic_statistics/tests/test_incremental_basic_statistics.py +196 -0
- onedal/cluster/__init__.py +27 -0
- onedal/cluster/dbscan.py +110 -0
- onedal/cluster/kmeans.py +564 -0
- onedal/cluster/kmeans_init.py +115 -0
- onedal/cluster/tests/test_dbscan.py +125 -0
- onedal/cluster/tests/test_kmeans.py +88 -0
- onedal/cluster/tests/test_kmeans_init.py +93 -0
- onedal/common/_base.py +38 -0
- onedal/common/_estimator_checks.py +47 -0
- onedal/common/_mixin.py +62 -0
- onedal/common/_policy.py +59 -0
- onedal/common/_spmd_policy.py +30 -0
- onedal/common/hyperparameters.py +125 -0
- onedal/common/tests/test_policy.py +76 -0
- onedal/covariance/__init__.py +20 -0
- onedal/covariance/covariance.py +125 -0
- onedal/covariance/incremental_covariance.py +146 -0
- onedal/covariance/tests/test_covariance.py +50 -0
- onedal/covariance/tests/test_incremental_covariance.py +122 -0
- onedal/datatypes/__init__.py +19 -0
- onedal/datatypes/_data_conversion.py +154 -0
- onedal/datatypes/tests/common.py +126 -0
- onedal/datatypes/tests/test_data.py +414 -0
- onedal/decomposition/__init__.py +20 -0
- onedal/decomposition/incremental_pca.py +204 -0
- onedal/decomposition/pca.py +186 -0
- onedal/decomposition/tests/test_incremental_pca.py +198 -0
- onedal/ensemble/__init__.py +29 -0
- onedal/ensemble/forest.py +727 -0
- onedal/ensemble/tests/test_random_forest.py +97 -0
- onedal/linear_model/__init__.py +27 -0
- onedal/linear_model/incremental_linear_model.py +258 -0
- onedal/linear_model/linear_model.py +329 -0
- onedal/linear_model/logistic_regression.py +249 -0
- onedal/linear_model/tests/test_incremental_linear_regression.py +168 -0
- onedal/linear_model/tests/test_incremental_ridge_regression.py +107 -0
- onedal/linear_model/tests/test_linear_regression.py +250 -0
- onedal/linear_model/tests/test_logistic_regression.py +95 -0
- onedal/linear_model/tests/test_ridge.py +95 -0
- onedal/neighbors/__init__.py +19 -0
- onedal/neighbors/neighbors.py +767 -0
- onedal/neighbors/tests/test_knn_classification.py +49 -0
- onedal/primitives/__init__.py +27 -0
- onedal/primitives/get_tree.py +25 -0
- onedal/primitives/kernel_functions.py +153 -0
- onedal/primitives/tests/test_kernel_functions.py +159 -0
- onedal/spmd/__init__.py +25 -0
- onedal/spmd/_base.py +30 -0
- onedal/spmd/basic_statistics/__init__.py +20 -0
- onedal/spmd/basic_statistics/basic_statistics.py +30 -0
- onedal/spmd/basic_statistics/incremental_basic_statistics.py +69 -0
- onedal/spmd/cluster/__init__.py +28 -0
- onedal/spmd/cluster/dbscan.py +23 -0
- onedal/spmd/cluster/kmeans.py +56 -0
- onedal/spmd/covariance/__init__.py +20 -0
- onedal/spmd/covariance/covariance.py +26 -0
- onedal/spmd/covariance/incremental_covariance.py +82 -0
- onedal/spmd/decomposition/__init__.py +20 -0
- onedal/spmd/decomposition/incremental_pca.py +117 -0
- onedal/spmd/decomposition/pca.py +26 -0
- onedal/spmd/ensemble/__init__.py +19 -0
- onedal/spmd/ensemble/forest.py +28 -0
- onedal/spmd/linear_model/__init__.py +21 -0
- onedal/spmd/linear_model/incremental_linear_model.py +97 -0
- onedal/spmd/linear_model/linear_model.py +30 -0
- onedal/spmd/linear_model/logistic_regression.py +38 -0
- onedal/spmd/neighbors/__init__.py +19 -0
- onedal/spmd/neighbors/neighbors.py +75 -0
- onedal/svm/__init__.py +19 -0
- onedal/svm/svm.py +556 -0
- onedal/svm/tests/test_csr_svm.py +351 -0
- onedal/svm/tests/test_nusvc.py +204 -0
- onedal/svm/tests/test_nusvr.py +210 -0
- onedal/svm/tests/test_svc.py +176 -0
- onedal/svm/tests/test_svr.py +243 -0
- onedal/tests/test_common.py +57 -0
- onedal/tests/utils/_dataframes_support.py +162 -0
- onedal/tests/utils/_device_selection.py +102 -0
- onedal/utils/__init__.py +49 -0
- onedal/utils/_array_api.py +81 -0
- onedal/utils/_dpep_helpers.py +56 -0
- onedal/utils/validation.py +440 -0
- scikit_learn_intelex-2025.1.0.dist-info/LICENSE.txt +202 -0
- scikit_learn_intelex-2025.1.0.dist-info/METADATA +231 -0
- scikit_learn_intelex-2025.1.0.dist-info/RECORD +280 -0
- scikit_learn_intelex-2025.1.0.dist-info/WHEEL +5 -0
- scikit_learn_intelex-2025.1.0.dist-info/top_level.txt +3 -0
- sklearnex/__init__.py +66 -0
- sklearnex/__main__.py +58 -0
- sklearnex/_config.py +116 -0
- sklearnex/_device_offload.py +126 -0
- sklearnex/_utils.py +132 -0
- sklearnex/basic_statistics/__init__.py +20 -0
- sklearnex/basic_statistics/basic_statistics.py +230 -0
- sklearnex/basic_statistics/incremental_basic_statistics.py +345 -0
- sklearnex/basic_statistics/tests/test_basic_statistics.py +270 -0
- sklearnex/basic_statistics/tests/test_incremental_basic_statistics.py +404 -0
- sklearnex/cluster/__init__.py +20 -0
- sklearnex/cluster/dbscan.py +197 -0
- sklearnex/cluster/k_means.py +395 -0
- sklearnex/cluster/tests/test_dbscan.py +38 -0
- sklearnex/cluster/tests/test_kmeans.py +159 -0
- sklearnex/conftest.py +82 -0
- sklearnex/covariance/__init__.py +19 -0
- sklearnex/covariance/incremental_covariance.py +398 -0
- sklearnex/covariance/tests/test_incremental_covariance.py +237 -0
- sklearnex/decomposition/__init__.py +19 -0
- sklearnex/decomposition/pca.py +425 -0
- sklearnex/decomposition/tests/test_pca.py +58 -0
- sklearnex/dispatcher.py +543 -0
- sklearnex/doc/third-party-programs.txt +424 -0
- sklearnex/ensemble/__init__.py +29 -0
- sklearnex/ensemble/_forest.py +2029 -0
- sklearnex/ensemble/tests/test_forest.py +135 -0
- sklearnex/glob/__main__.py +72 -0
- sklearnex/glob/dispatcher.py +101 -0
- sklearnex/linear_model/__init__.py +32 -0
- sklearnex/linear_model/coordinate_descent.py +30 -0
- sklearnex/linear_model/incremental_linear.py +482 -0
- sklearnex/linear_model/incremental_ridge.py +425 -0
- sklearnex/linear_model/linear.py +341 -0
- sklearnex/linear_model/logistic_regression.py +413 -0
- sklearnex/linear_model/ridge.py +24 -0
- sklearnex/linear_model/tests/test_incremental_linear.py +207 -0
- sklearnex/linear_model/tests/test_incremental_ridge.py +153 -0
- sklearnex/linear_model/tests/test_linear.py +167 -0
- sklearnex/linear_model/tests/test_logreg.py +134 -0
- sklearnex/manifold/__init__.py +19 -0
- sklearnex/manifold/t_sne.py +21 -0
- sklearnex/manifold/tests/test_tsne.py +26 -0
- sklearnex/metrics/__init__.py +23 -0
- sklearnex/metrics/pairwise.py +22 -0
- sklearnex/metrics/ranking.py +20 -0
- sklearnex/metrics/tests/test_metrics.py +39 -0
- sklearnex/model_selection/__init__.py +21 -0
- sklearnex/model_selection/split.py +22 -0
- sklearnex/model_selection/tests/test_model_selection.py +34 -0
- sklearnex/neighbors/__init__.py +27 -0
- sklearnex/neighbors/_lof.py +236 -0
- sklearnex/neighbors/common.py +310 -0
- sklearnex/neighbors/knn_classification.py +231 -0
- sklearnex/neighbors/knn_regression.py +207 -0
- sklearnex/neighbors/knn_unsupervised.py +178 -0
- sklearnex/neighbors/tests/test_neighbors.py +82 -0
- sklearnex/preview/__init__.py +17 -0
- sklearnex/preview/covariance/__init__.py +19 -0
- sklearnex/preview/covariance/covariance.py +138 -0
- sklearnex/preview/covariance/tests/test_covariance.py +66 -0
- sklearnex/preview/decomposition/__init__.py +19 -0
- sklearnex/preview/decomposition/incremental_pca.py +233 -0
- sklearnex/preview/decomposition/tests/test_incremental_pca.py +266 -0
- sklearnex/preview/linear_model/__init__.py +19 -0
- sklearnex/preview/linear_model/ridge.py +424 -0
- sklearnex/preview/linear_model/tests/test_ridge.py +102 -0
- sklearnex/spmd/__init__.py +25 -0
- sklearnex/spmd/basic_statistics/__init__.py +20 -0
- sklearnex/spmd/basic_statistics/basic_statistics.py +21 -0
- sklearnex/spmd/basic_statistics/incremental_basic_statistics.py +30 -0
- sklearnex/spmd/basic_statistics/tests/test_basic_statistics_spmd.py +107 -0
- sklearnex/spmd/basic_statistics/tests/test_incremental_basic_statistics_spmd.py +307 -0
- sklearnex/spmd/cluster/__init__.py +30 -0
- sklearnex/spmd/cluster/dbscan.py +50 -0
- sklearnex/spmd/cluster/kmeans.py +21 -0
- sklearnex/spmd/cluster/tests/test_dbscan_spmd.py +97 -0
- sklearnex/spmd/cluster/tests/test_kmeans_spmd.py +172 -0
- sklearnex/spmd/covariance/__init__.py +20 -0
- sklearnex/spmd/covariance/covariance.py +21 -0
- sklearnex/spmd/covariance/incremental_covariance.py +37 -0
- sklearnex/spmd/covariance/tests/test_covariance_spmd.py +107 -0
- sklearnex/spmd/covariance/tests/test_incremental_covariance_spmd.py +184 -0
- sklearnex/spmd/decomposition/__init__.py +20 -0
- sklearnex/spmd/decomposition/incremental_pca.py +30 -0
- sklearnex/spmd/decomposition/pca.py +21 -0
- sklearnex/spmd/decomposition/tests/test_incremental_pca_spmd.py +269 -0
- sklearnex/spmd/decomposition/tests/test_pca_spmd.py +128 -0
- sklearnex/spmd/ensemble/__init__.py +19 -0
- sklearnex/spmd/ensemble/forest.py +71 -0
- sklearnex/spmd/ensemble/tests/test_forest_spmd.py +265 -0
- sklearnex/spmd/linear_model/__init__.py +21 -0
- sklearnex/spmd/linear_model/incremental_linear_model.py +35 -0
- sklearnex/spmd/linear_model/linear_model.py +21 -0
- sklearnex/spmd/linear_model/logistic_regression.py +21 -0
- sklearnex/spmd/linear_model/tests/test_incremental_linear_spmd.py +329 -0
- sklearnex/spmd/linear_model/tests/test_linear_regression_spmd.py +145 -0
- sklearnex/spmd/linear_model/tests/test_logistic_regression_spmd.py +162 -0
- sklearnex/spmd/neighbors/__init__.py +19 -0
- sklearnex/spmd/neighbors/neighbors.py +25 -0
- sklearnex/spmd/neighbors/tests/test_neighbors_spmd.py +288 -0
- sklearnex/svm/__init__.py +29 -0
- sklearnex/svm/_common.py +339 -0
- sklearnex/svm/nusvc.py +371 -0
- sklearnex/svm/nusvr.py +170 -0
- sklearnex/svm/svc.py +399 -0
- sklearnex/svm/svr.py +167 -0
- sklearnex/svm/tests/test_svm.py +93 -0
- sklearnex/tests/test_common.py +390 -0
- sklearnex/tests/test_config.py +123 -0
- sklearnex/tests/test_memory_usage.py +379 -0
- sklearnex/tests/test_monkeypatch.py +276 -0
- sklearnex/tests/test_n_jobs_support.py +108 -0
- sklearnex/tests/test_parallel.py +48 -0
- sklearnex/tests/test_patching.py +385 -0
- sklearnex/tests/test_run_to_run_stability.py +321 -0
- sklearnex/tests/utils/__init__.py +44 -0
- sklearnex/tests/utils/base.py +371 -0
- sklearnex/tests/utils/spmd.py +198 -0
- sklearnex/utils/__init__.py +19 -0
- sklearnex/utils/_array_api.py +82 -0
- sklearnex/utils/parallel.py +59 -0
- sklearnex/utils/tests/test_finite.py +89 -0
- sklearnex/utils/validation.py +17 -0
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2024 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
from sklearn.base import BaseEstimator
|
|
19
|
+
from sklearn.utils import check_array, gen_batches
|
|
20
|
+
from sklearn.utils.validation import _check_sample_weight
|
|
21
|
+
|
|
22
|
+
from daal4py.sklearn._n_jobs_support import control_n_jobs
|
|
23
|
+
from daal4py.sklearn._utils import sklearn_check_version
|
|
24
|
+
from onedal.basic_statistics import (
|
|
25
|
+
IncrementalBasicStatistics as onedal_IncrementalBasicStatistics,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
from .._device_offload import dispatch
|
|
29
|
+
from .._utils import IntelEstimator, PatchingConditionsChain
|
|
30
|
+
|
|
31
|
+
if sklearn_check_version("1.2"):
|
|
32
|
+
from sklearn.utils._param_validation import Interval, StrOptions
|
|
33
|
+
|
|
34
|
+
import numbers
|
|
35
|
+
import warnings
|
|
36
|
+
|
|
37
|
+
if sklearn_check_version("1.6"):
|
|
38
|
+
from sklearn.utils.validation import validate_data
|
|
39
|
+
else:
|
|
40
|
+
validate_data = BaseEstimator._validate_data
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@control_n_jobs(decorated_methods=["partial_fit", "_onedal_finalize_fit"])
|
|
44
|
+
class IncrementalBasicStatistics(IntelEstimator, BaseEstimator):
|
|
45
|
+
"""
|
|
46
|
+
Calculates basic statistics on the given data, allows for computation when the data are split into
|
|
47
|
+
batches. The user can use ``partial_fit`` method to provide a single batch of data or use the ``fit`` method to provide
|
|
48
|
+
the entire dataset.
|
|
49
|
+
|
|
50
|
+
Parameters
|
|
51
|
+
----------
|
|
52
|
+
result_options: string or list, default='all'
|
|
53
|
+
List of statistics to compute
|
|
54
|
+
|
|
55
|
+
batch_size : int, default=None
|
|
56
|
+
The number of samples to use for each batch. Only used when calling
|
|
57
|
+
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
|
|
58
|
+
is inferred from the data and set to ``5 * n_features``.
|
|
59
|
+
|
|
60
|
+
Attributes
|
|
61
|
+
----------
|
|
62
|
+
min_ : ndarray of shape (n_features,)
|
|
63
|
+
Minimum of each feature over all samples.
|
|
64
|
+
|
|
65
|
+
max_ : ndarray of shape (n_features,)
|
|
66
|
+
Maximum of each feature over all samples.
|
|
67
|
+
|
|
68
|
+
sum_ : ndarray of shape (n_features,)
|
|
69
|
+
Sum of each feature over all samples.
|
|
70
|
+
|
|
71
|
+
mean_ : ndarray of shape (n_features,)
|
|
72
|
+
Mean of each feature over all samples.
|
|
73
|
+
|
|
74
|
+
variance_ : ndarray of shape (n_features,)
|
|
75
|
+
Variance of each feature over all samples.
|
|
76
|
+
|
|
77
|
+
variation_ : ndarray of shape (n_features,)
|
|
78
|
+
Variation of each feature over all samples.
|
|
79
|
+
|
|
80
|
+
sum_squares_ : ndarray of shape (n_features,)
|
|
81
|
+
Sum of squares for each feature over all samples.
|
|
82
|
+
|
|
83
|
+
standard_deviation_ : ndarray of shape (n_features,)
|
|
84
|
+
Standard deviation of each feature over all samples.
|
|
85
|
+
|
|
86
|
+
sum_squares_centered_ : ndarray of shape (n_features,)
|
|
87
|
+
Centered sum of squares for each feature over all samples.
|
|
88
|
+
|
|
89
|
+
second_order_raw_moment_ : ndarray of shape (n_features,)
|
|
90
|
+
Second order moment of each feature over all samples.
|
|
91
|
+
|
|
92
|
+
n_samples_seen_ : int
|
|
93
|
+
The number of samples processed by the estimator. Will be reset on
|
|
94
|
+
new calls to ``fit``, but increments across ``partial_fit`` calls.
|
|
95
|
+
|
|
96
|
+
batch_size_ : int
|
|
97
|
+
Inferred batch size from ``batch_size``.
|
|
98
|
+
|
|
99
|
+
n_features_in_ : int
|
|
100
|
+
Number of features seen during ``fit`` or ``partial_fit``.
|
|
101
|
+
|
|
102
|
+
Note
|
|
103
|
+
----
|
|
104
|
+
Attribute exists only if corresponding result option has been provided.
|
|
105
|
+
|
|
106
|
+
Note
|
|
107
|
+
----
|
|
108
|
+
Names of attributes without the trailing underscore are
|
|
109
|
+
supported currently but deprecated in 2025.1 and will be removed in 2026.0
|
|
110
|
+
|
|
111
|
+
Examples
|
|
112
|
+
--------
|
|
113
|
+
>>> import numpy as np
|
|
114
|
+
>>> from sklearnex.basic_statistics import IncrementalBasicStatistics
|
|
115
|
+
>>> incbs = IncrementalBasicStatistics(batch_size=1)
|
|
116
|
+
>>> X = np.array([[1, 2], [3, 4]])
|
|
117
|
+
>>> incbs.partial_fit(X[:1])
|
|
118
|
+
>>> incbs.partial_fit(X[1:])
|
|
119
|
+
>>> incbs.sum_
|
|
120
|
+
np.array([4., 6.])
|
|
121
|
+
>>> incbs.min_
|
|
122
|
+
np.array([1., 2.])
|
|
123
|
+
>>> incbs.fit(X)
|
|
124
|
+
>>> incbs.sum_
|
|
125
|
+
np.array([4., 6.])
|
|
126
|
+
>>> incbs.max_
|
|
127
|
+
np.array([3., 4.])
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
_onedal_incremental_basic_statistics = staticmethod(onedal_IncrementalBasicStatistics)
|
|
131
|
+
|
|
132
|
+
if sklearn_check_version("1.2"):
|
|
133
|
+
_parameter_constraints: dict = {
|
|
134
|
+
"result_options": [
|
|
135
|
+
StrOptions(
|
|
136
|
+
{
|
|
137
|
+
"all",
|
|
138
|
+
"min",
|
|
139
|
+
"max",
|
|
140
|
+
"sum",
|
|
141
|
+
"mean",
|
|
142
|
+
"variance",
|
|
143
|
+
"variation",
|
|
144
|
+
"sum_squares",
|
|
145
|
+
"standard_deviation",
|
|
146
|
+
"sum_squares_centered",
|
|
147
|
+
"second_order_raw_moment",
|
|
148
|
+
}
|
|
149
|
+
),
|
|
150
|
+
list,
|
|
151
|
+
],
|
|
152
|
+
"batch_size": [Interval(numbers.Integral, 1, None, closed="left"), None],
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
def __init__(self, result_options="all", batch_size=None):
|
|
156
|
+
if result_options == "all":
|
|
157
|
+
self.result_options = (
|
|
158
|
+
self._onedal_incremental_basic_statistics.get_all_result_options()
|
|
159
|
+
)
|
|
160
|
+
else:
|
|
161
|
+
self.result_options = result_options
|
|
162
|
+
self._need_to_finalize = False
|
|
163
|
+
self.batch_size = batch_size
|
|
164
|
+
|
|
165
|
+
def _onedal_supported(self, method_name, *data):
|
|
166
|
+
patching_status = PatchingConditionsChain(
|
|
167
|
+
f"sklearn.basic_statistics.{self.__class__.__name__}.{method_name}"
|
|
168
|
+
)
|
|
169
|
+
return patching_status
|
|
170
|
+
|
|
171
|
+
_onedal_cpu_supported = _onedal_supported
|
|
172
|
+
_onedal_gpu_supported = _onedal_supported
|
|
173
|
+
|
|
174
|
+
def _get_onedal_result_options(self, options):
|
|
175
|
+
if isinstance(options, list):
|
|
176
|
+
onedal_options = "|".join(self.result_options)
|
|
177
|
+
else:
|
|
178
|
+
onedal_options = options
|
|
179
|
+
assert isinstance(onedal_options, str)
|
|
180
|
+
return options
|
|
181
|
+
|
|
182
|
+
def _onedal_finalize_fit(self, queue=None):
|
|
183
|
+
assert hasattr(self, "_onedal_estimator")
|
|
184
|
+
self._onedal_estimator.finalize_fit(queue=queue)
|
|
185
|
+
self._need_to_finalize = False
|
|
186
|
+
|
|
187
|
+
def _onedal_partial_fit(self, X, sample_weight=None, queue=None, check_input=True):
|
|
188
|
+
first_pass = not hasattr(self, "n_samples_seen_") or self.n_samples_seen_ == 0
|
|
189
|
+
|
|
190
|
+
if check_input:
|
|
191
|
+
if sklearn_check_version("1.0"):
|
|
192
|
+
X = validate_data(
|
|
193
|
+
self,
|
|
194
|
+
X,
|
|
195
|
+
dtype=[np.float64, np.float32],
|
|
196
|
+
reset=first_pass,
|
|
197
|
+
)
|
|
198
|
+
else:
|
|
199
|
+
X = check_array(
|
|
200
|
+
X,
|
|
201
|
+
dtype=[np.float64, np.float32],
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if sample_weight is not None:
|
|
205
|
+
sample_weight = _check_sample_weight(sample_weight, X)
|
|
206
|
+
|
|
207
|
+
if first_pass:
|
|
208
|
+
self.n_samples_seen_ = X.shape[0]
|
|
209
|
+
self.n_features_in_ = X.shape[1]
|
|
210
|
+
else:
|
|
211
|
+
self.n_samples_seen_ += X.shape[0]
|
|
212
|
+
|
|
213
|
+
onedal_params = {
|
|
214
|
+
"result_options": self._get_onedal_result_options(self.result_options)
|
|
215
|
+
}
|
|
216
|
+
if not hasattr(self, "_onedal_estimator"):
|
|
217
|
+
self._onedal_estimator = self._onedal_incremental_basic_statistics(
|
|
218
|
+
**onedal_params
|
|
219
|
+
)
|
|
220
|
+
self._onedal_estimator.partial_fit(X, weights=sample_weight, queue=queue)
|
|
221
|
+
self._need_to_finalize = True
|
|
222
|
+
|
|
223
|
+
def _onedal_fit(self, X, sample_weight=None, queue=None):
|
|
224
|
+
if sklearn_check_version("1.2"):
|
|
225
|
+
self._validate_params()
|
|
226
|
+
|
|
227
|
+
if sklearn_check_version("1.0"):
|
|
228
|
+
X = validate_data(self, X, dtype=[np.float64, np.float32])
|
|
229
|
+
else:
|
|
230
|
+
X = check_array(X, dtype=[np.float64, np.float32])
|
|
231
|
+
|
|
232
|
+
if sample_weight is not None:
|
|
233
|
+
sample_weight = _check_sample_weight(sample_weight, X)
|
|
234
|
+
|
|
235
|
+
n_samples, n_features = X.shape
|
|
236
|
+
if self.batch_size is None:
|
|
237
|
+
self.batch_size_ = 5 * n_features
|
|
238
|
+
else:
|
|
239
|
+
self.batch_size_ = self.batch_size
|
|
240
|
+
|
|
241
|
+
self.n_samples_seen_ = 0
|
|
242
|
+
if hasattr(self, "_onedal_estimator"):
|
|
243
|
+
self._onedal_estimator._reset()
|
|
244
|
+
|
|
245
|
+
for batch in gen_batches(X.shape[0], self.batch_size_):
|
|
246
|
+
X_batch = X[batch]
|
|
247
|
+
weights_batch = sample_weight[batch] if sample_weight is not None else None
|
|
248
|
+
self._onedal_partial_fit(
|
|
249
|
+
X_batch, weights_batch, queue=queue, check_input=False
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
self.n_features_in_ = X.shape[1]
|
|
253
|
+
|
|
254
|
+
self._onedal_finalize_fit(queue=queue)
|
|
255
|
+
|
|
256
|
+
return self
|
|
257
|
+
|
|
258
|
+
def __getattr__(self, attr):
|
|
259
|
+
result_options = self.__dict__["result_options"]
|
|
260
|
+
sattr = attr.removesuffix("_")
|
|
261
|
+
is_statistic_attr = (
|
|
262
|
+
isinstance(result_options, str) and (sattr == result_options)
|
|
263
|
+
) or (isinstance(result_options, list) and (sattr in result_options))
|
|
264
|
+
if is_statistic_attr:
|
|
265
|
+
if self._need_to_finalize:
|
|
266
|
+
self._onedal_finalize_fit()
|
|
267
|
+
if sattr == attr:
|
|
268
|
+
warnings.warn(
|
|
269
|
+
"Result attributes without a trailing underscore were deprecated in version 2025.1 and will be removed in 2026.0"
|
|
270
|
+
)
|
|
271
|
+
return getattr(self._onedal_estimator, sattr)
|
|
272
|
+
if attr in self.__dict__:
|
|
273
|
+
return self.__dict__[attr]
|
|
274
|
+
|
|
275
|
+
raise AttributeError(
|
|
276
|
+
f"'{self.__class__.__name__}' object has no attribute '{attr}'"
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
def partial_fit(self, X, sample_weight=None, check_input=True):
|
|
280
|
+
"""Incremental fit with X. All of X is processed as a single batch.
|
|
281
|
+
|
|
282
|
+
Parameters
|
|
283
|
+
----------
|
|
284
|
+
X : array-like of shape (n_samples, n_features)
|
|
285
|
+
Data for compute, where ``n_samples`` is the number of samples and
|
|
286
|
+
``n_features`` is the number of features.
|
|
287
|
+
|
|
288
|
+
y : Ignored
|
|
289
|
+
Not used, present for API consistency by convention.
|
|
290
|
+
|
|
291
|
+
sample_weight : array-like of shape (n_samples,), default=None
|
|
292
|
+
Weights for compute weighted statistics, where ``n_samples`` is the number of samples.
|
|
293
|
+
|
|
294
|
+
check_input : bool, default=True
|
|
295
|
+
Run ``check_array`` on X.
|
|
296
|
+
|
|
297
|
+
Returns
|
|
298
|
+
-------
|
|
299
|
+
self : object
|
|
300
|
+
Returns the instance itself.
|
|
301
|
+
"""
|
|
302
|
+
dispatch(
|
|
303
|
+
self,
|
|
304
|
+
"partial_fit",
|
|
305
|
+
{
|
|
306
|
+
"onedal": self.__class__._onedal_partial_fit,
|
|
307
|
+
"sklearn": None,
|
|
308
|
+
},
|
|
309
|
+
X,
|
|
310
|
+
sample_weight,
|
|
311
|
+
check_input=check_input,
|
|
312
|
+
)
|
|
313
|
+
return self
|
|
314
|
+
|
|
315
|
+
def fit(self, X, y=None, sample_weight=None):
|
|
316
|
+
"""Calculate statistics of X using minibatches of size batch_size.
|
|
317
|
+
|
|
318
|
+
Parameters
|
|
319
|
+
----------
|
|
320
|
+
X : array-like of shape (n_samples, n_features)
|
|
321
|
+
Data for compute, where ``n_samples`` is the number of samples and
|
|
322
|
+
``n_features`` is the number of features.
|
|
323
|
+
|
|
324
|
+
y : Ignored
|
|
325
|
+
Not used, present for API consistency by convention.
|
|
326
|
+
|
|
327
|
+
sample_weight : array-like of shape (n_samples,), default=None
|
|
328
|
+
Weights for compute weighted statistics, where ``n_samples`` is the number of samples.
|
|
329
|
+
|
|
330
|
+
Returns
|
|
331
|
+
-------
|
|
332
|
+
self : object
|
|
333
|
+
Returns the instance itself.
|
|
334
|
+
"""
|
|
335
|
+
dispatch(
|
|
336
|
+
self,
|
|
337
|
+
"fit",
|
|
338
|
+
{
|
|
339
|
+
"onedal": self.__class__._onedal_fit,
|
|
340
|
+
"sklearn": None,
|
|
341
|
+
},
|
|
342
|
+
X,
|
|
343
|
+
sample_weight,
|
|
344
|
+
)
|
|
345
|
+
return self
|
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# Copyright 2023 Intel Corporation
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# ==============================================================================
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
import pytest
|
|
19
|
+
from numpy.testing import assert_allclose
|
|
20
|
+
|
|
21
|
+
from daal4py.sklearn._utils import daal_check_version
|
|
22
|
+
from onedal.basic_statistics.tests.test_basic_statistics import (
|
|
23
|
+
expected_max,
|
|
24
|
+
expected_mean,
|
|
25
|
+
expected_sum,
|
|
26
|
+
options_and_tests,
|
|
27
|
+
)
|
|
28
|
+
from onedal.tests.utils._dataframes_support import (
|
|
29
|
+
_convert_to_dataframe,
|
|
30
|
+
get_dataframes_and_queues,
|
|
31
|
+
)
|
|
32
|
+
from sklearnex.basic_statistics import BasicStatistics
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
36
|
+
def test_sklearnex_import_basic_statistics(dataframe, queue):
|
|
37
|
+
X = np.array([[0, 0], [1, 1]])
|
|
38
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
39
|
+
|
|
40
|
+
weights = np.array([1, 0.5])
|
|
41
|
+
weights_df = _convert_to_dataframe(weights, sycl_queue=queue, target_df=dataframe)
|
|
42
|
+
|
|
43
|
+
result = BasicStatistics().fit(X_df)
|
|
44
|
+
|
|
45
|
+
expected_mean = np.array([0.5, 0.5])
|
|
46
|
+
expected_min = np.array([0, 0])
|
|
47
|
+
expected_max = np.array([1, 1])
|
|
48
|
+
|
|
49
|
+
assert_allclose(expected_mean, result.mean)
|
|
50
|
+
assert_allclose(expected_max, result.max)
|
|
51
|
+
assert_allclose(expected_min, result.min)
|
|
52
|
+
|
|
53
|
+
result = BasicStatistics().fit(X_df, sample_weight=weights_df)
|
|
54
|
+
|
|
55
|
+
expected_weighted_mean = np.array([0.25, 0.25])
|
|
56
|
+
expected_weighted_min = np.array([0, 0])
|
|
57
|
+
expected_weighted_max = np.array([0.5, 0.5])
|
|
58
|
+
|
|
59
|
+
assert_allclose(expected_weighted_mean, result.mean)
|
|
60
|
+
assert_allclose(expected_weighted_min, result.min)
|
|
61
|
+
assert_allclose(expected_weighted_max, result.max)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
65
|
+
@pytest.mark.parametrize("weighted", [True, False])
|
|
66
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
67
|
+
def test_multiple_options_on_gold_data(dataframe, queue, weighted, dtype):
|
|
68
|
+
X = np.array([[0, 0], [1, 1]])
|
|
69
|
+
X = X.astype(dtype=dtype)
|
|
70
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
71
|
+
if weighted:
|
|
72
|
+
weights = np.array([1, 0.5])
|
|
73
|
+
weights = weights.astype(dtype=dtype)
|
|
74
|
+
weights_df = _convert_to_dataframe(weights, sycl_queue=queue, target_df=dataframe)
|
|
75
|
+
basicstat = BasicStatistics()
|
|
76
|
+
|
|
77
|
+
if weighted:
|
|
78
|
+
result = basicstat.fit(X_df, sample_weight=weights_df)
|
|
79
|
+
else:
|
|
80
|
+
result = basicstat.fit(X_df)
|
|
81
|
+
|
|
82
|
+
if weighted:
|
|
83
|
+
expected_weighted_mean = np.array([0.25, 0.25])
|
|
84
|
+
expected_weighted_min = np.array([0, 0])
|
|
85
|
+
expected_weighted_max = np.array([0.5, 0.5])
|
|
86
|
+
assert_allclose(expected_weighted_mean, result.mean)
|
|
87
|
+
assert_allclose(expected_weighted_max, result.max)
|
|
88
|
+
assert_allclose(expected_weighted_min, result.min)
|
|
89
|
+
else:
|
|
90
|
+
expected_mean = np.array([0.5, 0.5])
|
|
91
|
+
expected_min = np.array([0, 0])
|
|
92
|
+
expected_max = np.array([1, 1])
|
|
93
|
+
assert_allclose(expected_mean, result.mean)
|
|
94
|
+
assert_allclose(expected_max, result.max)
|
|
95
|
+
assert_allclose(expected_min, result.min)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
99
|
+
@pytest.mark.parametrize("option", options_and_tests)
|
|
100
|
+
@pytest.mark.parametrize("row_count", [100, 1000])
|
|
101
|
+
@pytest.mark.parametrize("column_count", [10, 100])
|
|
102
|
+
@pytest.mark.parametrize("weighted", [True, False])
|
|
103
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
104
|
+
def test_single_option_on_random_data(
|
|
105
|
+
dataframe, queue, option, row_count, column_count, weighted, dtype
|
|
106
|
+
):
|
|
107
|
+
result_option, function, tols = option
|
|
108
|
+
fp32tol, fp64tol = tols
|
|
109
|
+
seed = 77
|
|
110
|
+
gen = np.random.default_rng(seed)
|
|
111
|
+
X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
|
|
112
|
+
X = X.astype(dtype=dtype)
|
|
113
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
114
|
+
if weighted:
|
|
115
|
+
weights = gen.uniform(low=-0.5, high=1.0, size=row_count)
|
|
116
|
+
weights = weights.astype(dtype=dtype)
|
|
117
|
+
weights_df = _convert_to_dataframe(weights, sycl_queue=queue, target_df=dataframe)
|
|
118
|
+
basicstat = BasicStatistics(result_options=result_option)
|
|
119
|
+
|
|
120
|
+
if weighted:
|
|
121
|
+
result = basicstat.fit(X_df, sample_weight=weights_df)
|
|
122
|
+
else:
|
|
123
|
+
result = basicstat.fit(X_df)
|
|
124
|
+
|
|
125
|
+
res = getattr(result, result_option)
|
|
126
|
+
if weighted:
|
|
127
|
+
weighted_data = np.diag(weights) @ X
|
|
128
|
+
gtr = function(weighted_data)
|
|
129
|
+
else:
|
|
130
|
+
gtr = function(X)
|
|
131
|
+
|
|
132
|
+
tol = fp32tol if res.dtype == np.float32 else fp64tol
|
|
133
|
+
assert_allclose(gtr, res, atol=tol)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
137
|
+
@pytest.mark.parametrize("row_count", [100, 1000])
|
|
138
|
+
@pytest.mark.parametrize("column_count", [10, 100])
|
|
139
|
+
@pytest.mark.parametrize("weighted", [True, False])
|
|
140
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
141
|
+
def test_multiple_options_on_random_data(
|
|
142
|
+
dataframe, queue, row_count, column_count, weighted, dtype
|
|
143
|
+
):
|
|
144
|
+
seed = 77
|
|
145
|
+
gen = np.random.default_rng(seed)
|
|
146
|
+
X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
|
|
147
|
+
X = X.astype(dtype=dtype)
|
|
148
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
149
|
+
if weighted:
|
|
150
|
+
weights = gen.uniform(low=-0.5, high=1.0, size=row_count)
|
|
151
|
+
weights = weights.astype(dtype=dtype)
|
|
152
|
+
weights_df = _convert_to_dataframe(weights, sycl_queue=queue, target_df=dataframe)
|
|
153
|
+
basicstat = BasicStatistics(result_options=["mean", "max", "sum"])
|
|
154
|
+
|
|
155
|
+
if weighted:
|
|
156
|
+
result = basicstat.fit(X_df, sample_weight=weights_df)
|
|
157
|
+
else:
|
|
158
|
+
result = basicstat.fit(X_df)
|
|
159
|
+
|
|
160
|
+
res_mean, res_max, res_sum = result.mean, result.max, result.sum
|
|
161
|
+
if weighted:
|
|
162
|
+
weighted_data = np.diag(weights) @ X
|
|
163
|
+
gtr_mean, gtr_max, gtr_sum = (
|
|
164
|
+
expected_mean(weighted_data),
|
|
165
|
+
expected_max(weighted_data),
|
|
166
|
+
expected_sum(weighted_data),
|
|
167
|
+
)
|
|
168
|
+
else:
|
|
169
|
+
gtr_mean, gtr_max, gtr_sum = (
|
|
170
|
+
expected_mean(X),
|
|
171
|
+
expected_max(X),
|
|
172
|
+
expected_sum(X),
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
tol = 5e-4 if res_mean.dtype == np.float32 else 1e-7
|
|
176
|
+
assert_allclose(gtr_mean, res_mean, atol=tol)
|
|
177
|
+
assert_allclose(gtr_max, res_max, atol=tol)
|
|
178
|
+
assert_allclose(gtr_sum, res_sum, atol=tol)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
182
|
+
@pytest.mark.parametrize("row_count", [100, 1000])
|
|
183
|
+
@pytest.mark.parametrize("column_count", [10, 100])
|
|
184
|
+
@pytest.mark.parametrize("weighted", [True, False])
|
|
185
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
186
|
+
def test_all_option_on_random_data(
|
|
187
|
+
dataframe, queue, row_count, column_count, weighted, dtype
|
|
188
|
+
):
|
|
189
|
+
seed = 77
|
|
190
|
+
gen = np.random.default_rng(seed)
|
|
191
|
+
X = gen.uniform(low=-0.3, high=+0.7, size=(row_count, column_count))
|
|
192
|
+
X = X.astype(dtype=dtype)
|
|
193
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
194
|
+
if weighted:
|
|
195
|
+
weights = gen.uniform(low=-0.5, high=+1.0, size=row_count)
|
|
196
|
+
weights = weights.astype(dtype=dtype)
|
|
197
|
+
weights_df = _convert_to_dataframe(weights, sycl_queue=queue, target_df=dataframe)
|
|
198
|
+
basicstat = BasicStatistics(result_options="all")
|
|
199
|
+
|
|
200
|
+
if weighted:
|
|
201
|
+
result = basicstat.fit(X_df, sample_weight=weights_df)
|
|
202
|
+
else:
|
|
203
|
+
result = basicstat.fit(X_df)
|
|
204
|
+
|
|
205
|
+
if weighted:
|
|
206
|
+
weighted_data = np.diag(weights) @ X
|
|
207
|
+
|
|
208
|
+
for option in options_and_tests:
|
|
209
|
+
result_option, function, tols = option
|
|
210
|
+
fp32tol, fp64tol = tols
|
|
211
|
+
res = getattr(result, result_option)
|
|
212
|
+
if weighted:
|
|
213
|
+
gtr = function(weighted_data)
|
|
214
|
+
else:
|
|
215
|
+
gtr = function(X)
|
|
216
|
+
tol = fp32tol if res.dtype == np.float32 else fp64tol
|
|
217
|
+
assert_allclose(gtr, res, atol=tol)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
@pytest.mark.parametrize("dataframe,queue", get_dataframes_and_queues())
|
|
221
|
+
@pytest.mark.parametrize("option", options_and_tests)
|
|
222
|
+
@pytest.mark.parametrize("data_size", [100, 1000])
|
|
223
|
+
@pytest.mark.parametrize("weighted", [True, False])
|
|
224
|
+
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
|
225
|
+
def test_1d_input_on_random_data(dataframe, queue, option, data_size, weighted, dtype):
|
|
226
|
+
result_option, function, tols = option
|
|
227
|
+
fp32tol, fp64tol = tols
|
|
228
|
+
seed = 77
|
|
229
|
+
gen = np.random.default_rng(seed)
|
|
230
|
+
X = gen.uniform(low=-0.3, high=+0.7, size=data_size)
|
|
231
|
+
X = X.astype(dtype=dtype)
|
|
232
|
+
X_df = _convert_to_dataframe(X, sycl_queue=queue, target_df=dataframe)
|
|
233
|
+
if weighted:
|
|
234
|
+
weights = gen.uniform(low=-0.5, high=1.0, size=data_size)
|
|
235
|
+
weights = weights.astype(dtype=dtype)
|
|
236
|
+
weights_df = _convert_to_dataframe(weights, sycl_queue=queue, target_df=dataframe)
|
|
237
|
+
basicstat = BasicStatistics(result_options=result_option)
|
|
238
|
+
|
|
239
|
+
if weighted:
|
|
240
|
+
result = basicstat.fit(X_df, sample_weight=weights_df)
|
|
241
|
+
else:
|
|
242
|
+
result = basicstat.fit(X_df)
|
|
243
|
+
|
|
244
|
+
res = getattr(result, result_option)
|
|
245
|
+
if weighted:
|
|
246
|
+
weighted_data = weights * X
|
|
247
|
+
gtr = function(weighted_data)
|
|
248
|
+
else:
|
|
249
|
+
gtr = function(X)
|
|
250
|
+
|
|
251
|
+
tol = fp32tol if res.dtype == np.float32 else fp64tol
|
|
252
|
+
assert_allclose(gtr, res, atol=tol)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def test_warning():
|
|
256
|
+
basicstat = BasicStatistics("all")
|
|
257
|
+
data = np.array([0, 1])
|
|
258
|
+
|
|
259
|
+
basicstat.fit(data)
|
|
260
|
+
for i in basicstat._onedal_estimator.get_all_result_options():
|
|
261
|
+
with pytest.warns(
|
|
262
|
+
UserWarning,
|
|
263
|
+
match="Result attributes without a trailing underscore were deprecated in version 2025.1 and will be removed in 2026.0",
|
|
264
|
+
) as warn_record:
|
|
265
|
+
getattr(basicstat, i)
|
|
266
|
+
|
|
267
|
+
if daal_check_version((2026, "P", 0)):
|
|
268
|
+
assert len(warn_record) == 0, i
|
|
269
|
+
else:
|
|
270
|
+
assert len(warn_record) == 1, i
|