snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.kernel_approximation".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class RBFSampler(BaseTransformer):
|
57
58
|
r"""Approximate a RBF kernel feature map using random Fourier features
|
58
59
|
For more details on this class, see [sklearn.kernel_approximation.RBFSampler]
|
@@ -60,49 +61,63 @@ class RBFSampler(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
63
|
-
gamma: 'scale' or float, default=1.0
|
64
|
-
Parameter of RBF kernel: exp(-gamma * x^2).
|
65
|
-
If ``gamma='scale'`` is passed then it uses
|
66
|
-
1 / (n_features * X.var()) as value of gamma.
|
67
|
-
|
68
|
-
n_components: int, default=100
|
69
|
-
Number of Monte Carlo samples per original feature.
|
70
|
-
Equals the dimensionality of the computed feature space.
|
71
|
-
|
72
|
-
random_state: int, RandomState instance or None, default=None
|
73
|
-
Pseudo-random number generator to control the generation of the random
|
74
|
-
weights and random offset when fitting the training data.
|
75
|
-
Pass an int for reproducible output across multiple function calls.
|
76
|
-
See :term:`Glossary <random_state>`.
|
77
64
|
|
78
65
|
input_cols: Optional[Union[str, List[str]]]
|
79
66
|
A string or list of strings representing column names that contain features.
|
80
67
|
If this parameter is not specified, all columns in the input DataFrame except
|
81
|
-
the columns specified by label_cols
|
82
|
-
considered input columns.
|
83
|
-
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
84
72
|
label_cols: Optional[Union[str, List[str]]]
|
85
|
-
|
86
|
-
|
87
|
-
columns. If this parameter is not specified, then object is fitted without
|
88
|
-
labels (like a transformer).
|
89
|
-
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
90
75
|
output_cols: Optional[Union[str, List[str]]]
|
91
76
|
A string or list of strings representing column names that will store the
|
92
77
|
output of predict and transform operations. The length of output_cols must
|
93
|
-
match the expected number of output columns from the specific
|
78
|
+
match the expected number of output columns from the specific predictor or
|
94
79
|
transformer class used.
|
95
|
-
If this parameter
|
96
|
-
|
97
|
-
|
98
|
-
be set explicitly for transformers.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
99
89
|
|
100
90
|
sample_weight_col: Optional[str]
|
101
91
|
A string representing the column name containing the sample weights.
|
102
|
-
This argument is only required when working with weighted datasets.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
103
|
|
104
104
|
drop_input_cols: Optional[bool], default=False
|
105
105
|
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
107
|
+
gamma: 'scale' or float, default=1.0
|
108
|
+
Parameter of RBF kernel: exp(-gamma * x^2).
|
109
|
+
If ``gamma='scale'`` is passed then it uses
|
110
|
+
1 / (n_features * X.var()) as value of gamma.
|
111
|
+
|
112
|
+
n_components: int, default=100
|
113
|
+
Number of Monte Carlo samples per original feature.
|
114
|
+
Equals the dimensionality of the computed feature space.
|
115
|
+
|
116
|
+
random_state: int, RandomState instance or None, default=None
|
117
|
+
Pseudo-random number generator to control the generation of the random
|
118
|
+
weights and random offset when fitting the training data.
|
119
|
+
Pass an int for reproducible output across multiple function calls.
|
120
|
+
See :term:`Glossary <random_state>`.
|
106
121
|
"""
|
107
122
|
|
108
123
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -114,6 +129,7 @@ class RBFSampler(BaseTransformer):
|
|
114
129
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
115
130
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
116
131
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
132
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
117
133
|
drop_input_cols: Optional[bool] = False,
|
118
134
|
sample_weight_col: Optional[str] = None,
|
119
135
|
) -> None:
|
@@ -122,9 +138,10 @@ class RBFSampler(BaseTransformer):
|
|
122
138
|
self.set_input_cols(input_cols)
|
123
139
|
self.set_output_cols(output_cols)
|
124
140
|
self.set_label_cols(label_cols)
|
141
|
+
self.set_passthrough_cols(passthrough_cols)
|
125
142
|
self.set_drop_input_cols(drop_input_cols)
|
126
143
|
self.set_sample_weight_col(sample_weight_col)
|
127
|
-
deps = set(
|
144
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
128
145
|
|
129
146
|
self._deps = list(deps)
|
130
147
|
|
@@ -135,13 +152,14 @@ class RBFSampler(BaseTransformer):
|
|
135
152
|
args=init_args,
|
136
153
|
klass=sklearn.kernel_approximation.RBFSampler
|
137
154
|
)
|
138
|
-
self._sklearn_object = sklearn.kernel_approximation.RBFSampler(
|
155
|
+
self._sklearn_object: Any = sklearn.kernel_approximation.RBFSampler(
|
139
156
|
**cleaned_up_init_args,
|
140
157
|
)
|
141
158
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
142
159
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
143
160
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
144
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=RBFSampler.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
161
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=RBFSampler.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
162
|
+
self._autogenerated = True
|
145
163
|
|
146
164
|
def _get_rand_id(self) -> str:
|
147
165
|
"""
|
@@ -152,24 +170,6 @@ class RBFSampler(BaseTransformer):
|
|
152
170
|
"""
|
153
171
|
return str(uuid4()).replace("-", "_").upper()
|
154
172
|
|
155
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
156
|
-
"""
|
157
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
158
|
-
|
159
|
-
Args:
|
160
|
-
dataset: Input dataset.
|
161
|
-
"""
|
162
|
-
if not self.input_cols:
|
163
|
-
cols = [
|
164
|
-
c for c in dataset.columns
|
165
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
166
|
-
]
|
167
|
-
self.set_input_cols(input_cols=cols)
|
168
|
-
|
169
|
-
if not self.output_cols:
|
170
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
171
|
-
self.set_output_cols(output_cols=cols)
|
172
|
-
|
173
173
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "RBFSampler":
|
174
174
|
"""
|
175
175
|
Input columns setter.
|
@@ -215,54 +215,48 @@ class RBFSampler(BaseTransformer):
|
|
215
215
|
self
|
216
216
|
"""
|
217
217
|
self._infer_input_output_cols(dataset)
|
218
|
-
if isinstance(dataset,
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
self.
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
218
|
+
if isinstance(dataset, DataFrame):
|
219
|
+
session = dataset._session
|
220
|
+
assert session is not None # keep mypy happy
|
221
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
222
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
223
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
224
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
225
|
+
|
226
|
+
# Specify input columns so column pruning will be enforced
|
227
|
+
selected_cols = self._get_active_columns()
|
228
|
+
if len(selected_cols) > 0:
|
229
|
+
dataset = dataset.select(selected_cols)
|
230
|
+
|
231
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
232
|
+
|
233
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
234
|
+
if SNOWML_SPROC_ENV in os.environ:
|
235
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
236
|
+
project=_PROJECT,
|
237
|
+
subproject=_SUBPROJECT,
|
238
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), RBFSampler.__class__.__name__),
|
239
|
+
api_calls=[Session.call],
|
240
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
241
|
+
)
|
242
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
243
|
+
pd_df.columns = dataset.columns
|
244
|
+
dataset = pd_df
|
245
|
+
|
246
|
+
model_trainer = ModelTrainerBuilder.build(
|
247
|
+
estimator=self._sklearn_object,
|
248
|
+
dataset=dataset,
|
249
|
+
input_cols=self.input_cols,
|
250
|
+
label_cols=self.label_cols,
|
251
|
+
sample_weight_col=self.sample_weight_col,
|
252
|
+
autogenerated=self._autogenerated,
|
253
|
+
subproject=_SUBPROJECT
|
254
|
+
)
|
255
|
+
self._sklearn_object = model_trainer.train()
|
234
256
|
self._is_fitted = True
|
235
257
|
self._get_model_signatures(dataset)
|
236
258
|
return self
|
237
259
|
|
238
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
239
|
-
session = dataset._session
|
240
|
-
assert session is not None # keep mypy happy
|
241
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
242
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
243
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
244
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
245
|
-
|
246
|
-
# Specify input columns so column pruning will be enforced
|
247
|
-
selected_cols = self._get_active_columns()
|
248
|
-
if len(selected_cols) > 0:
|
249
|
-
dataset = dataset.select(selected_cols)
|
250
|
-
|
251
|
-
estimator = self._sklearn_object
|
252
|
-
assert estimator is not None # Keep mypy happy
|
253
|
-
|
254
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
255
|
-
|
256
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
257
|
-
dataset,
|
258
|
-
session,
|
259
|
-
estimator,
|
260
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
261
|
-
self.input_cols,
|
262
|
-
self.label_cols,
|
263
|
-
self.sample_weight_col,
|
264
|
-
)
|
265
|
-
|
266
260
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
267
261
|
if self._drop_input_cols:
|
268
262
|
return []
|
@@ -450,11 +444,6 @@ class RBFSampler(BaseTransformer):
|
|
450
444
|
subproject=_SUBPROJECT,
|
451
445
|
custom_tags=dict([("autogen", True)]),
|
452
446
|
)
|
453
|
-
@telemetry.add_stmt_params_to_df(
|
454
|
-
project=_PROJECT,
|
455
|
-
subproject=_SUBPROJECT,
|
456
|
-
custom_tags=dict([("autogen", True)]),
|
457
|
-
)
|
458
447
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
459
448
|
"""Method not supported for this class.
|
460
449
|
|
@@ -506,11 +495,6 @@ class RBFSampler(BaseTransformer):
|
|
506
495
|
subproject=_SUBPROJECT,
|
507
496
|
custom_tags=dict([("autogen", True)]),
|
508
497
|
)
|
509
|
-
@telemetry.add_stmt_params_to_df(
|
510
|
-
project=_PROJECT,
|
511
|
-
subproject=_SUBPROJECT,
|
512
|
-
custom_tags=dict([("autogen", True)]),
|
513
|
-
)
|
514
498
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
515
499
|
"""Apply the approximate feature map to X
|
516
500
|
For more details on this function, see [sklearn.kernel_approximation.RBFSampler.transform]
|
@@ -569,7 +553,8 @@ class RBFSampler(BaseTransformer):
|
|
569
553
|
if False:
|
570
554
|
self.fit(dataset)
|
571
555
|
assert self._sklearn_object is not None
|
572
|
-
|
556
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
557
|
+
return labels
|
573
558
|
else:
|
574
559
|
raise NotImplementedError
|
575
560
|
|
@@ -605,6 +590,7 @@ class RBFSampler(BaseTransformer):
|
|
605
590
|
output_cols = []
|
606
591
|
|
607
592
|
# Make sure column names are valid snowflake identifiers.
|
593
|
+
assert output_cols is not None # Make MyPy happy
|
608
594
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
609
595
|
|
610
596
|
return rv
|
@@ -615,11 +601,6 @@ class RBFSampler(BaseTransformer):
|
|
615
601
|
subproject=_SUBPROJECT,
|
616
602
|
custom_tags=dict([("autogen", True)]),
|
617
603
|
)
|
618
|
-
@telemetry.add_stmt_params_to_df(
|
619
|
-
project=_PROJECT,
|
620
|
-
subproject=_SUBPROJECT,
|
621
|
-
custom_tags=dict([("autogen", True)]),
|
622
|
-
)
|
623
604
|
def predict_proba(
|
624
605
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
625
606
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -660,11 +641,6 @@ class RBFSampler(BaseTransformer):
|
|
660
641
|
subproject=_SUBPROJECT,
|
661
642
|
custom_tags=dict([("autogen", True)]),
|
662
643
|
)
|
663
|
-
@telemetry.add_stmt_params_to_df(
|
664
|
-
project=_PROJECT,
|
665
|
-
subproject=_SUBPROJECT,
|
666
|
-
custom_tags=dict([("autogen", True)]),
|
667
|
-
)
|
668
644
|
def predict_log_proba(
|
669
645
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
670
646
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -701,16 +677,6 @@ class RBFSampler(BaseTransformer):
|
|
701
677
|
return output_df
|
702
678
|
|
703
679
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
704
|
-
@telemetry.send_api_usage_telemetry(
|
705
|
-
project=_PROJECT,
|
706
|
-
subproject=_SUBPROJECT,
|
707
|
-
custom_tags=dict([("autogen", True)]),
|
708
|
-
)
|
709
|
-
@telemetry.add_stmt_params_to_df(
|
710
|
-
project=_PROJECT,
|
711
|
-
subproject=_SUBPROJECT,
|
712
|
-
custom_tags=dict([("autogen", True)]),
|
713
|
-
)
|
714
680
|
def decision_function(
|
715
681
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
716
682
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -809,11 +775,6 @@ class RBFSampler(BaseTransformer):
|
|
809
775
|
subproject=_SUBPROJECT,
|
810
776
|
custom_tags=dict([("autogen", True)]),
|
811
777
|
)
|
812
|
-
@telemetry.add_stmt_params_to_df(
|
813
|
-
project=_PROJECT,
|
814
|
-
subproject=_SUBPROJECT,
|
815
|
-
custom_tags=dict([("autogen", True)]),
|
816
|
-
)
|
817
778
|
def kneighbors(
|
818
779
|
self,
|
819
780
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -873,18 +834,28 @@ class RBFSampler(BaseTransformer):
|
|
873
834
|
# For classifier, the type of predict is the same as the type of label
|
874
835
|
if self._sklearn_object._estimator_type == 'classifier':
|
875
836
|
# label columns is the desired type for output
|
876
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
837
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
877
838
|
# rename the output columns
|
878
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
839
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
840
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
841
|
+
([] if self._drop_input_cols else inputs)
|
842
|
+
+ outputs)
|
843
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
844
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
845
|
+
# Clusterer returns int64 cluster labels.
|
846
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
847
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
879
848
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
880
849
|
([] if self._drop_input_cols else inputs)
|
881
850
|
+ outputs)
|
851
|
+
|
882
852
|
# For regressor, the type of predict is float64
|
883
853
|
elif self._sklearn_object._estimator_type == 'regressor':
|
884
854
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
885
855
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
886
856
|
([] if self._drop_input_cols else inputs)
|
887
857
|
+ outputs)
|
858
|
+
|
888
859
|
for prob_func in PROB_FUNCTIONS:
|
889
860
|
if hasattr(self, prob_func):
|
890
861
|
output_cols_prefix: str = f"{prob_func}_"
|