snowflake-ml-python 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +96 -124
- snowflake/ml/modeling/cluster/affinity_propagation.py +94 -124
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +94 -124
- snowflake/ml/modeling/cluster/birch.py +94 -124
- snowflake/ml/modeling/cluster/bisecting_k_means.py +94 -124
- snowflake/ml/modeling/cluster/dbscan.py +94 -124
- snowflake/ml/modeling/cluster/feature_agglomeration.py +94 -124
- snowflake/ml/modeling/cluster/k_means.py +93 -124
- snowflake/ml/modeling/cluster/mean_shift.py +94 -124
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +93 -124
- snowflake/ml/modeling/cluster/optics.py +94 -124
- snowflake/ml/modeling/cluster/spectral_biclustering.py +94 -124
- snowflake/ml/modeling/cluster/spectral_clustering.py +94 -124
- snowflake/ml/modeling/cluster/spectral_coclustering.py +94 -124
- snowflake/ml/modeling/compose/column_transformer.py +94 -124
- snowflake/ml/modeling/compose/transformed_target_regressor.py +96 -124
- snowflake/ml/modeling/covariance/elliptic_envelope.py +94 -124
- snowflake/ml/modeling/covariance/empirical_covariance.py +80 -110
- snowflake/ml/modeling/covariance/graphical_lasso.py +94 -124
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +94 -124
- snowflake/ml/modeling/covariance/ledoit_wolf.py +85 -115
- snowflake/ml/modeling/covariance/min_cov_det.py +94 -124
- snowflake/ml/modeling/covariance/oas.py +80 -110
- snowflake/ml/modeling/covariance/shrunk_covariance.py +84 -114
- snowflake/ml/modeling/decomposition/dictionary_learning.py +94 -124
- snowflake/ml/modeling/decomposition/factor_analysis.py +94 -124
- snowflake/ml/modeling/decomposition/fast_ica.py +94 -124
- snowflake/ml/modeling/decomposition/incremental_pca.py +94 -124
- snowflake/ml/modeling/decomposition/kernel_pca.py +94 -124
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +94 -124
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +94 -124
- snowflake/ml/modeling/decomposition/pca.py +94 -124
- snowflake/ml/modeling/decomposition/sparse_pca.py +94 -124
- snowflake/ml/modeling/decomposition/truncated_svd.py +94 -124
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +96 -124
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +91 -119
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/bagging_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/bagging_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/isolation_forest.py +94 -124
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/stacking_regressor.py +96 -124
- snowflake/ml/modeling/ensemble/voting_classifier.py +96 -124
- snowflake/ml/modeling/ensemble/voting_regressor.py +91 -119
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +82 -110
- snowflake/ml/modeling/feature_selection/select_fdr.py +80 -108
- snowflake/ml/modeling/feature_selection/select_fpr.py +80 -108
- snowflake/ml/modeling/feature_selection/select_fwe.py +80 -108
- snowflake/ml/modeling/feature_selection/select_k_best.py +81 -109
- snowflake/ml/modeling/feature_selection/select_percentile.py +80 -108
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +94 -124
- snowflake/ml/modeling/feature_selection/variance_threshold.py +76 -106
- snowflake/ml/modeling/framework/base.py +2 -2
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +96 -124
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +96 -124
- snowflake/ml/modeling/impute/iterative_imputer.py +94 -124
- snowflake/ml/modeling/impute/knn_imputer.py +94 -124
- snowflake/ml/modeling/impute/missing_indicator.py +94 -124
- snowflake/ml/modeling/impute/simple_imputer.py +1 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +77 -107
- snowflake/ml/modeling/kernel_approximation/nystroem.py +94 -124
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +94 -124
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +86 -116
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +84 -114
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +96 -124
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +71 -100
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +71 -100
- snowflake/ml/modeling/linear_model/ard_regression.py +96 -124
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +96 -124
- snowflake/ml/modeling/linear_model/elastic_net.py +96 -124
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +96 -124
- snowflake/ml/modeling/linear_model/gamma_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/huber_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/lars.py +96 -124
- snowflake/ml/modeling/linear_model/lars_cv.py +96 -124
- snowflake/ml/modeling/linear_model/lasso.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_cv.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_lars.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +96 -124
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +96 -124
- snowflake/ml/modeling/linear_model/linear_regression.py +91 -119
- snowflake/ml/modeling/linear_model/logistic_regression.py +96 -124
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +96 -124
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +96 -124
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +96 -124
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +96 -124
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +95 -124
- snowflake/ml/modeling/linear_model/perceptron.py +95 -124
- snowflake/ml/modeling/linear_model/poisson_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/ransac_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/ridge.py +96 -124
- snowflake/ml/modeling/linear_model/ridge_classifier.py +96 -124
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +96 -124
- snowflake/ml/modeling/linear_model/ridge_cv.py +96 -124
- snowflake/ml/modeling/linear_model/sgd_classifier.py +96 -124
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +94 -124
- snowflake/ml/modeling/linear_model/sgd_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +96 -124
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +96 -124
- snowflake/ml/modeling/manifold/isomap.py +94 -124
- snowflake/ml/modeling/manifold/mds.py +94 -124
- snowflake/ml/modeling/manifold/spectral_embedding.py +94 -124
- snowflake/ml/modeling/manifold/tsne.py +94 -124
- snowflake/ml/modeling/metrics/classification.py +187 -52
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +94 -124
- snowflake/ml/modeling/mixture/gaussian_mixture.py +94 -124
- snowflake/ml/modeling/model_selection/grid_search_cv.py +88 -138
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +90 -144
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +86 -114
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +93 -121
- snowflake/ml/modeling/multiclass/output_code_classifier.py +94 -122
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +92 -120
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +96 -124
- snowflake/ml/modeling/naive_bayes/complement_nb.py +92 -120
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +79 -107
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +88 -116
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +96 -124
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +96 -124
- snowflake/ml/modeling/neighbors/kernel_density.py +94 -124
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +94 -124
- snowflake/ml/modeling/neighbors/nearest_centroid.py +89 -117
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +94 -124
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +96 -124
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +96 -124
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +96 -124
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +94 -124
- snowflake/ml/modeling/neural_network/mlp_classifier.py +96 -124
- snowflake/ml/modeling/neural_network/mlp_regressor.py +96 -124
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +14 -9
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +0 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +21 -13
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +20 -14
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +35 -19
- snowflake/ml/modeling/preprocessing/normalizer.py +6 -9
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +20 -13
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +25 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +94 -124
- snowflake/ml/modeling/preprocessing/robust_scaler.py +28 -14
- snowflake/ml/modeling/preprocessing/standard_scaler.py +25 -13
- snowflake/ml/modeling/semi_supervised/label_propagation.py +96 -124
- snowflake/ml/modeling/semi_supervised/label_spreading.py +96 -124
- snowflake/ml/modeling/svm/linear_svc.py +96 -124
- snowflake/ml/modeling/svm/linear_svr.py +96 -124
- snowflake/ml/modeling/svm/nu_svc.py +96 -124
- snowflake/ml/modeling/svm/nu_svr.py +96 -124
- snowflake/ml/modeling/svm/svc.py +96 -124
- snowflake/ml/modeling/svm/svr.py +96 -124
- snowflake/ml/modeling/tree/decision_tree_classifier.py +96 -124
- snowflake/ml/modeling/tree/decision_tree_regressor.py +96 -124
- snowflake/ml/modeling/tree/extra_tree_classifier.py +96 -124
- snowflake/ml/modeling/tree/extra_tree_regressor.py +96 -124
- snowflake/ml/modeling/xgboost/xgb_classifier.py +96 -125
- snowflake/ml/modeling/xgboost/xgb_regressor.py +96 -125
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +96 -125
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +96 -125
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +21 -3
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.1.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -101,7 +101,7 @@ class OneHotEncoder(base.BaseTransformer):
|
|
101
101
|
(https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html).
|
102
102
|
|
103
103
|
Args:
|
104
|
-
categories: 'auto' or dict {column_name: ndarray([category])}, default='auto'
|
104
|
+
categories: 'auto' or dict {column_name: np.ndarray([category])}, default='auto'
|
105
105
|
Categories (unique values) per feature:
|
106
106
|
- 'auto': Determine categories automatically from the training data.
|
107
107
|
- dict: ``categories[column_name]`` holds the categories expected in
|
@@ -109,6 +109,7 @@ class OneHotEncoder(base.BaseTransformer):
|
|
109
109
|
and numeric values within a single feature, and should be sorted in
|
110
110
|
case of numeric values.
|
111
111
|
The used categories can be found in the ``categories_`` attribute.
|
112
|
+
|
112
113
|
drop: {‘first’, ‘if_binary’} or an array-like of shape (n_features,), default=None
|
113
114
|
Specifies a methodology to use to drop one of the categories per
|
114
115
|
feature. This is useful in situations where perfectly collinear
|
@@ -128,15 +129,18 @@ class OneHotEncoder(base.BaseTransformer):
|
|
128
129
|
When `max_categories` or `min_frequency` is configured to group
|
129
130
|
infrequent categories, the dropping behavior is handled after the
|
130
131
|
grouping.
|
132
|
+
|
131
133
|
sparse: bool, default=False
|
132
134
|
Will return a column with sparse representation if set True else will return
|
133
135
|
a separate column for each category.
|
136
|
+
|
134
137
|
handle_unknown: {'error', 'ignore'}, default='error'
|
135
138
|
Specifies the way unknown categories are handled during :meth:`transform`.
|
136
139
|
- 'error': Raise an error if an unknown category is present during transform.
|
137
140
|
- 'ignore': When an unknown category is encountered during
|
138
141
|
transform, the resulting one-hot encoded columns for this feature
|
139
142
|
will be all zeros.
|
143
|
+
|
140
144
|
min_frequency: int or float, default=None
|
141
145
|
Specifies the minimum frequency below which a category will be
|
142
146
|
considered infrequent.
|
@@ -144,22 +148,29 @@ class OneHotEncoder(base.BaseTransformer):
|
|
144
148
|
infrequent.
|
145
149
|
- If `float`, categories with a smaller cardinality than
|
146
150
|
`min_frequency * n_samples` will be considered infrequent.
|
151
|
+
|
147
152
|
max_categories: int, default=None
|
148
153
|
Specifies an upper limit to the number of output features for each input
|
149
154
|
feature when considering infrequent categories. If there are infrequent
|
150
155
|
categories, `max_categories` includes the category representing the
|
151
156
|
infrequent categories along with the frequent categories. If `None`,
|
152
157
|
there is no limit to the number of output features.
|
153
|
-
|
158
|
+
|
159
|
+
input_cols: Optional[Union[str, List[str]]], default=None
|
154
160
|
Single or multiple input columns.
|
155
|
-
|
161
|
+
|
162
|
+
output_cols: Optional[Union[str, List[str]]], default=None
|
156
163
|
Single or multiple output columns.
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
164
|
+
|
165
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
166
|
+
A string or a list of strings indicating column names to be excluded from any
|
167
|
+
operations (such as train, transform, or inference). These specified column(s)
|
168
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
169
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
170
|
+
columns, like index columns, during training or inference.
|
171
|
+
|
172
|
+
drop_input_cols: Optional[Union[str, List[str]]]
|
173
|
+
Remove input columns from output if set True. False by default.
|
163
174
|
|
164
175
|
Attributes:
|
165
176
|
categories_: dict {column_name: ndarray([category])}
|
@@ -665,10 +676,6 @@ class OneHotEncoder(base.BaseTransformer):
|
|
665
676
|
project=base.PROJECT,
|
666
677
|
subproject=base.SUBPROJECT,
|
667
678
|
)
|
668
|
-
@telemetry.add_stmt_params_to_df(
|
669
|
-
project=base.PROJECT,
|
670
|
-
subproject=base.SUBPROJECT,
|
671
|
-
)
|
672
679
|
def transform(
|
673
680
|
self, dataset: Union[snowpark.DataFrame, pd.DataFrame]
|
674
681
|
) -> Union[snowpark.DataFrame, pd.DataFrame, sparse.csr_matrix]:
|
@@ -45,31 +45,47 @@ class OrdinalEncoder(base.BaseTransformer):
|
|
45
45
|
(https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html).
|
46
46
|
|
47
47
|
Args:
|
48
|
-
categories:
|
48
|
+
categories: Union[str, Dict[str, type_utils.LiteralNDArrayType]], default="auto"
|
49
|
+
The string 'auto' (the default) causes the categories to be extracted from the input columns.
|
49
50
|
To specify the categories yourself, pass a dictionary mapping the column name to an ndarray containing the
|
50
51
|
categories.
|
51
|
-
|
52
|
+
|
53
|
+
handle_unknown: str, default="error"
|
54
|
+
Specifies how unknown categories are handled during transformation. Applicable only if
|
52
55
|
categories is not 'auto'.
|
53
56
|
Valid values are:
|
54
57
|
- 'error': Raise an error if an unknown category is present during transform (default).
|
55
58
|
- 'use_encoded_value': When an unknown category is encountered during transform, the specified
|
56
59
|
encoded_missing_value (below) is used.
|
57
|
-
|
60
|
+
|
61
|
+
unknown_value: Optional[Union[int, float]], default=None
|
62
|
+
When the parameter handle_unknown is set to 'use_encoded_value', this parameter is required and
|
58
63
|
will set the encoded value of unknown categories. It has to be distinct from the values used to encode any
|
59
64
|
of the categories in `fit`.
|
60
|
-
|
61
|
-
|
62
|
-
|
65
|
+
|
66
|
+
encoded_missing_value: Union[int, float], default=np.nan
|
67
|
+
The value to be used to encode unknown categories.
|
68
|
+
|
69
|
+
input_cols: Optional[Union[str, List[str]]], default=None
|
70
|
+
The name(s) of one or more columns in a DataFrame containing a feature to be encoded.
|
71
|
+
|
72
|
+
output_cols: Optional[Union[str, List[str]]], default=None
|
73
|
+
The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
|
63
74
|
columns specified must match the number of input columns.
|
64
|
-
|
75
|
+
|
76
|
+
passthrough_cols: Optional[Union[str, List[str]]], default=None
|
77
|
+
A string or a list of strings indicating column names to be excluded from any
|
65
78
|
operations (such as train, transform, or inference). These specified column(s)
|
66
79
|
will remain untouched throughout the process. This option is helpful in scenarios
|
67
80
|
requiring automatic input_cols inference, but need to avoid using specific
|
68
81
|
columns, like index columns, during training or inference.
|
69
|
-
|
82
|
+
|
83
|
+
drop_input_cols: Optional[bool], default=False
|
84
|
+
Remove input columns from output if set True. False by default.
|
70
85
|
|
71
86
|
Attributes:
|
72
|
-
categories_ (dict of ndarray):
|
87
|
+
categories_ (dict of ndarray): List[type_utils.LiteralNDArrayType]
|
88
|
+
The categories of each feature determined during fitting. Maps input column
|
73
89
|
names to an array of the detected categories.
|
74
90
|
Attributes are valid only after fit() has been called.
|
75
91
|
"""
|
@@ -429,10 +445,6 @@ class OrdinalEncoder(base.BaseTransformer):
|
|
429
445
|
project=base.PROJECT,
|
430
446
|
subproject=base.SUBPROJECT,
|
431
447
|
)
|
432
|
-
@telemetry.add_stmt_params_to_df(
|
433
|
-
project=base.PROJECT,
|
434
|
-
subproject=base.SUBPROJECT,
|
435
|
-
)
|
436
448
|
def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
|
437
449
|
"""
|
438
450
|
Transform dataset to ordinal codes.
|
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.preprocessing".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class PolynomialFeatures(BaseTransformer):
|
57
58
|
r"""Generate polynomial and interaction features
|
58
59
|
For more details on this class, see [sklearn.preprocessing.PolynomialFeatures]
|
@@ -60,6 +61,49 @@ class PolynomialFeatures(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
degree: int or tuple (min_degree, max_degree), default=2
|
64
108
|
If a single int is given, it specifies the maximal degree of the
|
65
109
|
polynomial features. If a tuple `(min_degree, max_degree)` is passed,
|
@@ -84,42 +128,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
84
128
|
order: {'C', 'F'}, default='C'
|
85
129
|
Order of output array in the dense case. `'F'` order is faster to
|
86
130
|
compute, but may slow down subsequent estimators.
|
87
|
-
|
88
|
-
input_cols: Optional[Union[str, List[str]]]
|
89
|
-
A string or list of strings representing column names that contain features.
|
90
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
91
|
-
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
92
|
-
parameters are considered input columns.
|
93
|
-
|
94
|
-
label_cols: Optional[Union[str, List[str]]]
|
95
|
-
A string or list of strings representing column names that contain labels.
|
96
|
-
This is a required param for estimators, as there is no way to infer these
|
97
|
-
columns. If this parameter is not specified, then object is fitted without
|
98
|
-
labels (like a transformer).
|
99
|
-
|
100
|
-
output_cols: Optional[Union[str, List[str]]]
|
101
|
-
A string or list of strings representing column names that will store the
|
102
|
-
output of predict and transform operations. The length of output_cols must
|
103
|
-
match the expected number of output columns from the specific estimator or
|
104
|
-
transformer class used.
|
105
|
-
If this parameter is not specified, output column names are derived by
|
106
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
107
|
-
column names work for estimator's predict() method, but output_cols must
|
108
|
-
be set explicitly for transformers.
|
109
|
-
|
110
|
-
sample_weight_col: Optional[str]
|
111
|
-
A string representing the column name containing the sample weights.
|
112
|
-
This argument is only required when working with weighted datasets.
|
113
|
-
|
114
|
-
passthrough_cols: Optional[Union[str, List[str]]]
|
115
|
-
A string or a list of strings indicating column names to be excluded from any
|
116
|
-
operations (such as train, transform, or inference). These specified column(s)
|
117
|
-
will remain untouched throughout the process. This option is helpful in scenarios
|
118
|
-
requiring automatic input_cols inference, but need to avoid using specific
|
119
|
-
columns, like index columns, during training or inference.
|
120
|
-
|
121
|
-
drop_input_cols: Optional[bool], default=False
|
122
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
123
131
|
"""
|
124
132
|
|
125
133
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -144,7 +152,7 @@ class PolynomialFeatures(BaseTransformer):
|
|
144
152
|
self.set_passthrough_cols(passthrough_cols)
|
145
153
|
self.set_drop_input_cols(drop_input_cols)
|
146
154
|
self.set_sample_weight_col(sample_weight_col)
|
147
|
-
deps = set(
|
155
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
148
156
|
|
149
157
|
self._deps = list(deps)
|
150
158
|
|
@@ -156,13 +164,14 @@ class PolynomialFeatures(BaseTransformer):
|
|
156
164
|
args=init_args,
|
157
165
|
klass=sklearn.preprocessing.PolynomialFeatures
|
158
166
|
)
|
159
|
-
self._sklearn_object = sklearn.preprocessing.PolynomialFeatures(
|
167
|
+
self._sklearn_object: Any = sklearn.preprocessing.PolynomialFeatures(
|
160
168
|
**cleaned_up_init_args,
|
161
169
|
)
|
162
170
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
163
171
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
164
172
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
165
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=PolynomialFeatures.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
173
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=PolynomialFeatures.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
174
|
+
self._autogenerated = True
|
166
175
|
|
167
176
|
def _get_rand_id(self) -> str:
|
168
177
|
"""
|
@@ -218,54 +227,48 @@ class PolynomialFeatures(BaseTransformer):
|
|
218
227
|
self
|
219
228
|
"""
|
220
229
|
self._infer_input_output_cols(dataset)
|
221
|
-
if isinstance(dataset,
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
self.
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
230
|
+
if isinstance(dataset, DataFrame):
|
231
|
+
session = dataset._session
|
232
|
+
assert session is not None # keep mypy happy
|
233
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
234
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
235
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
236
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
237
|
+
|
238
|
+
# Specify input columns so column pruning will be enforced
|
239
|
+
selected_cols = self._get_active_columns()
|
240
|
+
if len(selected_cols) > 0:
|
241
|
+
dataset = dataset.select(selected_cols)
|
242
|
+
|
243
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
244
|
+
|
245
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
246
|
+
if SNOWML_SPROC_ENV in os.environ:
|
247
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
248
|
+
project=_PROJECT,
|
249
|
+
subproject=_SUBPROJECT,
|
250
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), PolynomialFeatures.__class__.__name__),
|
251
|
+
api_calls=[Session.call],
|
252
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
253
|
+
)
|
254
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
255
|
+
pd_df.columns = dataset.columns
|
256
|
+
dataset = pd_df
|
257
|
+
|
258
|
+
model_trainer = ModelTrainerBuilder.build(
|
259
|
+
estimator=self._sklearn_object,
|
260
|
+
dataset=dataset,
|
261
|
+
input_cols=self.input_cols,
|
262
|
+
label_cols=self.label_cols,
|
263
|
+
sample_weight_col=self.sample_weight_col,
|
264
|
+
autogenerated=self._autogenerated,
|
265
|
+
subproject=_SUBPROJECT
|
266
|
+
)
|
267
|
+
self._sklearn_object = model_trainer.train()
|
237
268
|
self._is_fitted = True
|
238
269
|
self._get_model_signatures(dataset)
|
239
270
|
return self
|
240
271
|
|
241
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
242
|
-
session = dataset._session
|
243
|
-
assert session is not None # keep mypy happy
|
244
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
245
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
246
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
247
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
248
|
-
|
249
|
-
# Specify input columns so column pruning will be enforced
|
250
|
-
selected_cols = self._get_active_columns()
|
251
|
-
if len(selected_cols) > 0:
|
252
|
-
dataset = dataset.select(selected_cols)
|
253
|
-
|
254
|
-
estimator = self._sklearn_object
|
255
|
-
assert estimator is not None # Keep mypy happy
|
256
|
-
|
257
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
258
|
-
|
259
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
260
|
-
dataset,
|
261
|
-
session,
|
262
|
-
estimator,
|
263
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
264
|
-
self.input_cols,
|
265
|
-
self.label_cols,
|
266
|
-
self.sample_weight_col,
|
267
|
-
)
|
268
|
-
|
269
272
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
270
273
|
if self._drop_input_cols:
|
271
274
|
return []
|
@@ -453,11 +456,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
453
456
|
subproject=_SUBPROJECT,
|
454
457
|
custom_tags=dict([("autogen", True)]),
|
455
458
|
)
|
456
|
-
@telemetry.add_stmt_params_to_df(
|
457
|
-
project=_PROJECT,
|
458
|
-
subproject=_SUBPROJECT,
|
459
|
-
custom_tags=dict([("autogen", True)]),
|
460
|
-
)
|
461
459
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
462
460
|
"""Method not supported for this class.
|
463
461
|
|
@@ -509,11 +507,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
509
507
|
subproject=_SUBPROJECT,
|
510
508
|
custom_tags=dict([("autogen", True)]),
|
511
509
|
)
|
512
|
-
@telemetry.add_stmt_params_to_df(
|
513
|
-
project=_PROJECT,
|
514
|
-
subproject=_SUBPROJECT,
|
515
|
-
custom_tags=dict([("autogen", True)]),
|
516
|
-
)
|
517
510
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
518
511
|
"""Transform data to polynomial features
|
519
512
|
For more details on this function, see [sklearn.preprocessing.PolynomialFeatures.transform]
|
@@ -572,7 +565,8 @@ class PolynomialFeatures(BaseTransformer):
|
|
572
565
|
if False:
|
573
566
|
self.fit(dataset)
|
574
567
|
assert self._sklearn_object is not None
|
575
|
-
|
568
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
569
|
+
return labels
|
576
570
|
else:
|
577
571
|
raise NotImplementedError
|
578
572
|
|
@@ -608,6 +602,7 @@ class PolynomialFeatures(BaseTransformer):
|
|
608
602
|
output_cols = []
|
609
603
|
|
610
604
|
# Make sure column names are valid snowflake identifiers.
|
605
|
+
assert output_cols is not None # Make MyPy happy
|
611
606
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
612
607
|
|
613
608
|
return rv
|
@@ -618,11 +613,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
618
613
|
subproject=_SUBPROJECT,
|
619
614
|
custom_tags=dict([("autogen", True)]),
|
620
615
|
)
|
621
|
-
@telemetry.add_stmt_params_to_df(
|
622
|
-
project=_PROJECT,
|
623
|
-
subproject=_SUBPROJECT,
|
624
|
-
custom_tags=dict([("autogen", True)]),
|
625
|
-
)
|
626
616
|
def predict_proba(
|
627
617
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
628
618
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -663,11 +653,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
663
653
|
subproject=_SUBPROJECT,
|
664
654
|
custom_tags=dict([("autogen", True)]),
|
665
655
|
)
|
666
|
-
@telemetry.add_stmt_params_to_df(
|
667
|
-
project=_PROJECT,
|
668
|
-
subproject=_SUBPROJECT,
|
669
|
-
custom_tags=dict([("autogen", True)]),
|
670
|
-
)
|
671
656
|
def predict_log_proba(
|
672
657
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
673
658
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -704,16 +689,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
704
689
|
return output_df
|
705
690
|
|
706
691
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
707
|
-
@telemetry.send_api_usage_telemetry(
|
708
|
-
project=_PROJECT,
|
709
|
-
subproject=_SUBPROJECT,
|
710
|
-
custom_tags=dict([("autogen", True)]),
|
711
|
-
)
|
712
|
-
@telemetry.add_stmt_params_to_df(
|
713
|
-
project=_PROJECT,
|
714
|
-
subproject=_SUBPROJECT,
|
715
|
-
custom_tags=dict([("autogen", True)]),
|
716
|
-
)
|
717
692
|
def decision_function(
|
718
693
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
719
694
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -812,11 +787,6 @@ class PolynomialFeatures(BaseTransformer):
|
|
812
787
|
subproject=_SUBPROJECT,
|
813
788
|
custom_tags=dict([("autogen", True)]),
|
814
789
|
)
|
815
|
-
@telemetry.add_stmt_params_to_df(
|
816
|
-
project=_PROJECT,
|
817
|
-
subproject=_SUBPROJECT,
|
818
|
-
custom_tags=dict([("autogen", True)]),
|
819
|
-
)
|
820
790
|
def kneighbors(
|
821
791
|
self,
|
822
792
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -876,9 +846,9 @@ class PolynomialFeatures(BaseTransformer):
|
|
876
846
|
# For classifier, the type of predict is the same as the type of label
|
877
847
|
if self._sklearn_object._estimator_type == 'classifier':
|
878
848
|
# label columns is the desired type for output
|
879
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
849
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
880
850
|
# rename the output columns
|
881
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
851
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
882
852
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
883
853
|
([] if self._drop_input_cols else inputs)
|
884
854
|
+ outputs)
|
@@ -20,28 +20,46 @@ class RobustScaler(base.BaseTransformer):
|
|
20
20
|
(https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html).
|
21
21
|
|
22
22
|
Args:
|
23
|
-
with_centering:
|
24
|
-
|
25
|
-
|
23
|
+
with_centering: bool, default=True
|
24
|
+
If True, center the data around zero before scaling.
|
25
|
+
|
26
|
+
with_scaling: bool, default=True
|
27
|
+
If True, scale the data to interquartile range.
|
28
|
+
|
29
|
+
quantile_range: Tuple[float, float], default=(25.0, 75.0)
|
30
|
+
tuple like (q_min, q_max), where 0.0 < q_min < q_max < 100.0, default=(25.0, 75.0). Quantile
|
26
31
|
range used to calculate scale_. By default, this is equal to the IQR, i.e., q_min is the first quantile and
|
27
32
|
q_max is the third quantile.
|
28
|
-
|
33
|
+
|
34
|
+
unit_variance: bool, default=False
|
35
|
+
If True, scale data so that normally-distributed features have a variance of 1. In general, if
|
29
36
|
the difference between the x-values of q_max and q_min for a standard normal distribution is greater than 1,
|
30
37
|
the dataset is scaled down. If less than 1, the dataset is scaled up.
|
31
|
-
|
32
|
-
|
38
|
+
|
39
|
+
input_cols: Optional[Union[str, List[str]]], default=None
|
40
|
+
The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
|
41
|
+
|
42
|
+
output_cols: Optional[Union[str, List[str]]], default=None
|
43
|
+
The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
|
33
44
|
columns specified must match the number of input columns. For dense output, the column names specified are
|
34
45
|
used as base names for the columns created for each category.
|
35
|
-
|
46
|
+
|
47
|
+
passthrough_cols: Optional[Union[str, List[str]]], default=None
|
48
|
+
A string or a list of strings indicating column names to be excluded from any
|
36
49
|
operations (such as train, transform, or inference). These specified column(s)
|
37
50
|
will remain untouched throughout the process. This option is helpful in scenarios
|
38
51
|
requiring automatic input_cols inference, but need to avoid using specific
|
39
52
|
columns, like index columns, during training or inference.
|
40
|
-
|
53
|
+
|
54
|
+
drop_input_cols: Optional[bool], default=False
|
55
|
+
Remove input columns from output if set True. False by default.
|
41
56
|
|
42
57
|
Attributes:
|
43
|
-
center_:
|
44
|
-
|
58
|
+
center_: Dict[str, float]
|
59
|
+
Dictionary mapping input column name to the median value for that feature.
|
60
|
+
|
61
|
+
scale_: Dict[str, float]
|
62
|
+
Dictionary mapping input column name to the (scaled) interquartile range for that feature.
|
45
63
|
"""
|
46
64
|
|
47
65
|
def __init__(
|
@@ -199,10 +217,6 @@ class RobustScaler(base.BaseTransformer):
|
|
199
217
|
project=base.PROJECT,
|
200
218
|
subproject=base.SUBPROJECT,
|
201
219
|
)
|
202
|
-
@telemetry.add_stmt_params_to_df(
|
203
|
-
project=base.PROJECT,
|
204
|
-
subproject=base.SUBPROJECT,
|
205
|
-
)
|
206
220
|
def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
|
207
221
|
"""
|
208
222
|
Center and scale the data.
|
@@ -19,24 +19,40 @@ class StandardScaler(base.BaseTransformer):
|
|
19
19
|
(https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html).
|
20
20
|
|
21
21
|
Args:
|
22
|
-
with_mean:
|
23
|
-
|
24
|
-
|
25
|
-
|
22
|
+
with_mean: bool, default=True
|
23
|
+
If True, center the data before scaling.
|
24
|
+
|
25
|
+
with_std: bool, default=True
|
26
|
+
If True, scale the data unit variance (i.e. unit standard deviation).
|
27
|
+
|
28
|
+
input_cols: Optional[Union[str, List[str]]], default=None
|
29
|
+
The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
|
30
|
+
|
31
|
+
output_cols: Optional[Union[str, List[str]]], default=None
|
32
|
+
The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
|
26
33
|
columns specified must match the number of input columns.
|
27
|
-
|
34
|
+
|
35
|
+
passthrough_cols: Optional[Union[str, List[str]]], default=None
|
36
|
+
A string or a list of strings indicating column names to be excluded from any
|
28
37
|
operations (such as train, transform, or inference). These specified column(s)
|
29
38
|
will remain untouched throughout the process. This option is helpful in scenarios
|
30
39
|
requiring automatic input_cols inference, but need to avoid using specific
|
31
40
|
columns, like index columns, during training or inference.
|
32
|
-
|
41
|
+
|
42
|
+
drop_input_cols: Optional[bool], default=False
|
43
|
+
Remove input columns from output if set True. False by default.
|
33
44
|
|
34
45
|
Attributes:
|
35
|
-
scale_:
|
46
|
+
scale_: Optional[Dict[str, float]] = {}
|
47
|
+
Dictionary mapping input column names to relative scaling factor to achieve zero mean and unit variance.
|
36
48
|
If a variance is zero, unit variance could not be achieved, and the data is left as-is, giving a scaling
|
37
49
|
factor of 1. None if with_std is False.
|
38
|
-
|
39
|
-
|
50
|
+
|
51
|
+
mean_: Optional[Dict[str, float]] = {}
|
52
|
+
Dictionary mapping input column name to the mean value for that feature. None if with_mean is False.
|
53
|
+
|
54
|
+
var_: Optional[Dict[str, float]] = {}
|
55
|
+
Dictionary mapping input column name to the variance for that feature. Used to compute scale_. None if
|
40
56
|
with_std is False
|
41
57
|
"""
|
42
58
|
|
@@ -177,10 +193,6 @@ class StandardScaler(base.BaseTransformer):
|
|
177
193
|
project=base.PROJECT,
|
178
194
|
subproject=base.SUBPROJECT,
|
179
195
|
)
|
180
|
-
@telemetry.add_stmt_params_to_df(
|
181
|
-
project=base.PROJECT,
|
182
|
-
subproject=base.SUBPROJECT,
|
183
|
-
)
|
184
196
|
def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
|
185
197
|
"""
|
186
198
|
Perform standardization by centering and scaling.
|