snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.cluster".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class MeanShift(BaseTransformer):
|
57
58
|
r"""Mean shift clustering using a flat kernel
|
58
59
|
For more details on this class, see [sklearn.cluster.MeanShift]
|
@@ -60,6 +61,49 @@ class MeanShift(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
bandwidth: float, default=None
|
64
108
|
Bandwidth used in the flat kernel.
|
65
109
|
|
@@ -109,35 +153,6 @@ class MeanShift(BaseTransformer):
|
|
109
153
|
max_iter: int, default=300
|
110
154
|
Maximum number of iterations, per seed point before the clustering
|
111
155
|
operation terminates (for that seed point), if has not converged yet.
|
112
|
-
|
113
|
-
input_cols: Optional[Union[str, List[str]]]
|
114
|
-
A string or list of strings representing column names that contain features.
|
115
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
116
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
117
|
-
considered input columns.
|
118
|
-
|
119
|
-
label_cols: Optional[Union[str, List[str]]]
|
120
|
-
A string or list of strings representing column names that contain labels.
|
121
|
-
This is a required param for estimators, as there is no way to infer these
|
122
|
-
columns. If this parameter is not specified, then object is fitted without
|
123
|
-
labels (like a transformer).
|
124
|
-
|
125
|
-
output_cols: Optional[Union[str, List[str]]]
|
126
|
-
A string or list of strings representing column names that will store the
|
127
|
-
output of predict and transform operations. The length of output_cols must
|
128
|
-
match the expected number of output columns from the specific estimator or
|
129
|
-
transformer class used.
|
130
|
-
If this parameter is not specified, output column names are derived by
|
131
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
132
|
-
column names work for estimator's predict() method, but output_cols must
|
133
|
-
be set explicitly for transformers.
|
134
|
-
|
135
|
-
sample_weight_col: Optional[str]
|
136
|
-
A string representing the column name containing the sample weights.
|
137
|
-
This argument is only required when working with weighted datasets.
|
138
|
-
|
139
|
-
drop_input_cols: Optional[bool], default=False
|
140
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
141
156
|
"""
|
142
157
|
|
143
158
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -153,6 +168,7 @@ class MeanShift(BaseTransformer):
|
|
153
168
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
154
169
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
155
170
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
171
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
156
172
|
drop_input_cols: Optional[bool] = False,
|
157
173
|
sample_weight_col: Optional[str] = None,
|
158
174
|
) -> None:
|
@@ -161,9 +177,10 @@ class MeanShift(BaseTransformer):
|
|
161
177
|
self.set_input_cols(input_cols)
|
162
178
|
self.set_output_cols(output_cols)
|
163
179
|
self.set_label_cols(label_cols)
|
180
|
+
self.set_passthrough_cols(passthrough_cols)
|
164
181
|
self.set_drop_input_cols(drop_input_cols)
|
165
182
|
self.set_sample_weight_col(sample_weight_col)
|
166
|
-
deps = set(
|
183
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
167
184
|
|
168
185
|
self._deps = list(deps)
|
169
186
|
|
@@ -178,13 +195,14 @@ class MeanShift(BaseTransformer):
|
|
178
195
|
args=init_args,
|
179
196
|
klass=sklearn.cluster.MeanShift
|
180
197
|
)
|
181
|
-
self._sklearn_object = sklearn.cluster.MeanShift(
|
198
|
+
self._sklearn_object: Any = sklearn.cluster.MeanShift(
|
182
199
|
**cleaned_up_init_args,
|
183
200
|
)
|
184
201
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
185
202
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
186
203
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
187
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=MeanShift.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
204
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=MeanShift.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
205
|
+
self._autogenerated = True
|
188
206
|
|
189
207
|
def _get_rand_id(self) -> str:
|
190
208
|
"""
|
@@ -195,24 +213,6 @@ class MeanShift(BaseTransformer):
|
|
195
213
|
"""
|
196
214
|
return str(uuid4()).replace("-", "_").upper()
|
197
215
|
|
198
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
199
|
-
"""
|
200
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
201
|
-
|
202
|
-
Args:
|
203
|
-
dataset: Input dataset.
|
204
|
-
"""
|
205
|
-
if not self.input_cols:
|
206
|
-
cols = [
|
207
|
-
c for c in dataset.columns
|
208
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
209
|
-
]
|
210
|
-
self.set_input_cols(input_cols=cols)
|
211
|
-
|
212
|
-
if not self.output_cols:
|
213
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
214
|
-
self.set_output_cols(output_cols=cols)
|
215
|
-
|
216
216
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "MeanShift":
|
217
217
|
"""
|
218
218
|
Input columns setter.
|
@@ -258,54 +258,48 @@ class MeanShift(BaseTransformer):
|
|
258
258
|
self
|
259
259
|
"""
|
260
260
|
self._infer_input_output_cols(dataset)
|
261
|
-
if isinstance(dataset,
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
self.
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
261
|
+
if isinstance(dataset, DataFrame):
|
262
|
+
session = dataset._session
|
263
|
+
assert session is not None # keep mypy happy
|
264
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
265
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
266
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
267
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
268
|
+
|
269
|
+
# Specify input columns so column pruning will be enforced
|
270
|
+
selected_cols = self._get_active_columns()
|
271
|
+
if len(selected_cols) > 0:
|
272
|
+
dataset = dataset.select(selected_cols)
|
273
|
+
|
274
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
275
|
+
|
276
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
277
|
+
if SNOWML_SPROC_ENV in os.environ:
|
278
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
279
|
+
project=_PROJECT,
|
280
|
+
subproject=_SUBPROJECT,
|
281
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), MeanShift.__class__.__name__),
|
282
|
+
api_calls=[Session.call],
|
283
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
284
|
+
)
|
285
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
286
|
+
pd_df.columns = dataset.columns
|
287
|
+
dataset = pd_df
|
288
|
+
|
289
|
+
model_trainer = ModelTrainerBuilder.build(
|
290
|
+
estimator=self._sklearn_object,
|
291
|
+
dataset=dataset,
|
292
|
+
input_cols=self.input_cols,
|
293
|
+
label_cols=self.label_cols,
|
294
|
+
sample_weight_col=self.sample_weight_col,
|
295
|
+
autogenerated=self._autogenerated,
|
296
|
+
subproject=_SUBPROJECT
|
297
|
+
)
|
298
|
+
self._sklearn_object = model_trainer.train()
|
277
299
|
self._is_fitted = True
|
278
300
|
self._get_model_signatures(dataset)
|
279
301
|
return self
|
280
302
|
|
281
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
282
|
-
session = dataset._session
|
283
|
-
assert session is not None # keep mypy happy
|
284
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
285
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
286
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
287
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
288
|
-
|
289
|
-
# Specify input columns so column pruning will be enforced
|
290
|
-
selected_cols = self._get_active_columns()
|
291
|
-
if len(selected_cols) > 0:
|
292
|
-
dataset = dataset.select(selected_cols)
|
293
|
-
|
294
|
-
estimator = self._sklearn_object
|
295
|
-
assert estimator is not None # Keep mypy happy
|
296
|
-
|
297
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
298
|
-
|
299
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
300
|
-
dataset,
|
301
|
-
session,
|
302
|
-
estimator,
|
303
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
304
|
-
self.input_cols,
|
305
|
-
self.label_cols,
|
306
|
-
self.sample_weight_col,
|
307
|
-
)
|
308
|
-
|
309
303
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
310
304
|
if self._drop_input_cols:
|
311
305
|
return []
|
@@ -493,11 +487,6 @@ class MeanShift(BaseTransformer):
|
|
493
487
|
subproject=_SUBPROJECT,
|
494
488
|
custom_tags=dict([("autogen", True)]),
|
495
489
|
)
|
496
|
-
@telemetry.add_stmt_params_to_df(
|
497
|
-
project=_PROJECT,
|
498
|
-
subproject=_SUBPROJECT,
|
499
|
-
custom_tags=dict([("autogen", True)]),
|
500
|
-
)
|
501
490
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
502
491
|
"""Predict the closest cluster each sample in X belongs to
|
503
492
|
For more details on this function, see [sklearn.cluster.MeanShift.predict]
|
@@ -551,11 +540,6 @@ class MeanShift(BaseTransformer):
|
|
551
540
|
subproject=_SUBPROJECT,
|
552
541
|
custom_tags=dict([("autogen", True)]),
|
553
542
|
)
|
554
|
-
@telemetry.add_stmt_params_to_df(
|
555
|
-
project=_PROJECT,
|
556
|
-
subproject=_SUBPROJECT,
|
557
|
-
custom_tags=dict([("autogen", True)]),
|
558
|
-
)
|
559
543
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
560
544
|
"""Method not supported for this class.
|
561
545
|
|
@@ -614,7 +598,8 @@ class MeanShift(BaseTransformer):
|
|
614
598
|
if True:
|
615
599
|
self.fit(dataset)
|
616
600
|
assert self._sklearn_object is not None
|
617
|
-
|
601
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
602
|
+
return labels
|
618
603
|
else:
|
619
604
|
raise NotImplementedError
|
620
605
|
|
@@ -650,6 +635,7 @@ class MeanShift(BaseTransformer):
|
|
650
635
|
output_cols = []
|
651
636
|
|
652
637
|
# Make sure column names are valid snowflake identifiers.
|
638
|
+
assert output_cols is not None # Make MyPy happy
|
653
639
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
654
640
|
|
655
641
|
return rv
|
@@ -660,11 +646,6 @@ class MeanShift(BaseTransformer):
|
|
660
646
|
subproject=_SUBPROJECT,
|
661
647
|
custom_tags=dict([("autogen", True)]),
|
662
648
|
)
|
663
|
-
@telemetry.add_stmt_params_to_df(
|
664
|
-
project=_PROJECT,
|
665
|
-
subproject=_SUBPROJECT,
|
666
|
-
custom_tags=dict([("autogen", True)]),
|
667
|
-
)
|
668
649
|
def predict_proba(
|
669
650
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
670
651
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -705,11 +686,6 @@ class MeanShift(BaseTransformer):
|
|
705
686
|
subproject=_SUBPROJECT,
|
706
687
|
custom_tags=dict([("autogen", True)]),
|
707
688
|
)
|
708
|
-
@telemetry.add_stmt_params_to_df(
|
709
|
-
project=_PROJECT,
|
710
|
-
subproject=_SUBPROJECT,
|
711
|
-
custom_tags=dict([("autogen", True)]),
|
712
|
-
)
|
713
689
|
def predict_log_proba(
|
714
690
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
715
691
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -746,16 +722,6 @@ class MeanShift(BaseTransformer):
|
|
746
722
|
return output_df
|
747
723
|
|
748
724
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
749
|
-
@telemetry.send_api_usage_telemetry(
|
750
|
-
project=_PROJECT,
|
751
|
-
subproject=_SUBPROJECT,
|
752
|
-
custom_tags=dict([("autogen", True)]),
|
753
|
-
)
|
754
|
-
@telemetry.add_stmt_params_to_df(
|
755
|
-
project=_PROJECT,
|
756
|
-
subproject=_SUBPROJECT,
|
757
|
-
custom_tags=dict([("autogen", True)]),
|
758
|
-
)
|
759
725
|
def decision_function(
|
760
726
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
761
727
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -854,11 +820,6 @@ class MeanShift(BaseTransformer):
|
|
854
820
|
subproject=_SUBPROJECT,
|
855
821
|
custom_tags=dict([("autogen", True)]),
|
856
822
|
)
|
857
|
-
@telemetry.add_stmt_params_to_df(
|
858
|
-
project=_PROJECT,
|
859
|
-
subproject=_SUBPROJECT,
|
860
|
-
custom_tags=dict([("autogen", True)]),
|
861
|
-
)
|
862
823
|
def kneighbors(
|
863
824
|
self,
|
864
825
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -918,18 +879,28 @@ class MeanShift(BaseTransformer):
|
|
918
879
|
# For classifier, the type of predict is the same as the type of label
|
919
880
|
if self._sklearn_object._estimator_type == 'classifier':
|
920
881
|
# label columns is the desired type for output
|
921
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
882
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
922
883
|
# rename the output columns
|
923
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
884
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
885
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
886
|
+
([] if self._drop_input_cols else inputs)
|
887
|
+
+ outputs)
|
888
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
889
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
890
|
+
# Clusterer returns int64 cluster labels.
|
891
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
892
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
924
893
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
925
894
|
([] if self._drop_input_cols else inputs)
|
926
895
|
+ outputs)
|
896
|
+
|
927
897
|
# For regressor, the type of predict is float64
|
928
898
|
elif self._sklearn_object._estimator_type == 'regressor':
|
929
899
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
930
900
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
931
901
|
([] if self._drop_input_cols else inputs)
|
932
902
|
+ outputs)
|
903
|
+
|
933
904
|
for prob_func in PROB_FUNCTIONS:
|
934
905
|
if hasattr(self, prob_func):
|
935
906
|
output_cols_prefix: str = f"{prob_func}_"
|