snowflake-ml-python 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. snowflake/cortex/_complete.py +1 -1
  2. snowflake/cortex/_extract_answer.py +1 -1
  3. snowflake/cortex/_sentiment.py +1 -1
  4. snowflake/cortex/_summarize.py +1 -1
  5. snowflake/cortex/_translate.py +1 -1
  6. snowflake/ml/_internal/env_utils.py +68 -6
  7. snowflake/ml/_internal/file_utils.py +34 -4
  8. snowflake/ml/_internal/telemetry.py +79 -91
  9. snowflake/ml/_internal/utils/retryable_http.py +16 -4
  10. snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
  11. snowflake/ml/dataset/dataset.py +1 -1
  12. snowflake/ml/model/_api.py +21 -14
  13. snowflake/ml/model/_client/model/model_impl.py +176 -0
  14. snowflake/ml/model/_client/model/model_method_info.py +19 -0
  15. snowflake/ml/model/_client/model/model_version_impl.py +291 -0
  16. snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
  17. snowflake/ml/model/_client/ops/model_ops.py +308 -0
  18. snowflake/ml/model/_client/sql/model.py +75 -0
  19. snowflake/ml/model/_client/sql/model_version.py +213 -0
  20. snowflake/ml/model/_client/sql/stage.py +40 -0
  21. snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
  22. snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
  23. snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
  24. snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
  25. snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
  26. snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
  27. snowflake/ml/model/_model_composer/model_composer.py +31 -9
  28. snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
  29. snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
  30. snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
  31. snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
  32. snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
  33. snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
  34. snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
  35. snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
  36. snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
  37. snowflake/ml/model/model_signature.py +108 -53
  38. snowflake/ml/model/type_hints.py +1 -0
  39. snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
  40. snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
  41. snowflake/ml/modeling/_internal/model_specifications.py +146 -0
  42. snowflake/ml/modeling/_internal/model_trainer.py +13 -0
  43. snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
  44. snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
  45. snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
  46. snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
  47. snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +96 -124
  48. snowflake/ml/modeling/cluster/affinity_propagation.py +94 -124
  49. snowflake/ml/modeling/cluster/agglomerative_clustering.py +94 -124
  50. snowflake/ml/modeling/cluster/birch.py +94 -124
  51. snowflake/ml/modeling/cluster/bisecting_k_means.py +94 -124
  52. snowflake/ml/modeling/cluster/dbscan.py +94 -124
  53. snowflake/ml/modeling/cluster/feature_agglomeration.py +94 -124
  54. snowflake/ml/modeling/cluster/k_means.py +93 -124
  55. snowflake/ml/modeling/cluster/mean_shift.py +94 -124
  56. snowflake/ml/modeling/cluster/mini_batch_k_means.py +93 -124
  57. snowflake/ml/modeling/cluster/optics.py +94 -124
  58. snowflake/ml/modeling/cluster/spectral_biclustering.py +94 -124
  59. snowflake/ml/modeling/cluster/spectral_clustering.py +94 -124
  60. snowflake/ml/modeling/cluster/spectral_coclustering.py +94 -124
  61. snowflake/ml/modeling/compose/column_transformer.py +94 -124
  62. snowflake/ml/modeling/compose/transformed_target_regressor.py +96 -124
  63. snowflake/ml/modeling/covariance/elliptic_envelope.py +94 -124
  64. snowflake/ml/modeling/covariance/empirical_covariance.py +80 -110
  65. snowflake/ml/modeling/covariance/graphical_lasso.py +94 -124
  66. snowflake/ml/modeling/covariance/graphical_lasso_cv.py +94 -124
  67. snowflake/ml/modeling/covariance/ledoit_wolf.py +85 -115
  68. snowflake/ml/modeling/covariance/min_cov_det.py +94 -124
  69. snowflake/ml/modeling/covariance/oas.py +80 -110
  70. snowflake/ml/modeling/covariance/shrunk_covariance.py +84 -114
  71. snowflake/ml/modeling/decomposition/dictionary_learning.py +94 -124
  72. snowflake/ml/modeling/decomposition/factor_analysis.py +94 -124
  73. snowflake/ml/modeling/decomposition/fast_ica.py +94 -124
  74. snowflake/ml/modeling/decomposition/incremental_pca.py +94 -124
  75. snowflake/ml/modeling/decomposition/kernel_pca.py +94 -124
  76. snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +94 -124
  77. snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +94 -124
  78. snowflake/ml/modeling/decomposition/pca.py +94 -124
  79. snowflake/ml/modeling/decomposition/sparse_pca.py +94 -124
  80. snowflake/ml/modeling/decomposition/truncated_svd.py +94 -124
  81. snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +96 -124
  82. snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +91 -119
  83. snowflake/ml/modeling/ensemble/ada_boost_classifier.py +96 -124
  84. snowflake/ml/modeling/ensemble/ada_boost_regressor.py +96 -124
  85. snowflake/ml/modeling/ensemble/bagging_classifier.py +96 -124
  86. snowflake/ml/modeling/ensemble/bagging_regressor.py +96 -124
  87. snowflake/ml/modeling/ensemble/extra_trees_classifier.py +96 -124
  88. snowflake/ml/modeling/ensemble/extra_trees_regressor.py +96 -124
  89. snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +96 -124
  90. snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +96 -124
  91. snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +96 -124
  92. snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +96 -124
  93. snowflake/ml/modeling/ensemble/isolation_forest.py +94 -124
  94. snowflake/ml/modeling/ensemble/random_forest_classifier.py +96 -124
  95. snowflake/ml/modeling/ensemble/random_forest_regressor.py +96 -124
  96. snowflake/ml/modeling/ensemble/stacking_regressor.py +96 -124
  97. snowflake/ml/modeling/ensemble/voting_classifier.py +96 -124
  98. snowflake/ml/modeling/ensemble/voting_regressor.py +91 -119
  99. snowflake/ml/modeling/feature_selection/generic_univariate_select.py +82 -110
  100. snowflake/ml/modeling/feature_selection/select_fdr.py +80 -108
  101. snowflake/ml/modeling/feature_selection/select_fpr.py +80 -108
  102. snowflake/ml/modeling/feature_selection/select_fwe.py +80 -108
  103. snowflake/ml/modeling/feature_selection/select_k_best.py +81 -109
  104. snowflake/ml/modeling/feature_selection/select_percentile.py +80 -108
  105. snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +94 -124
  106. snowflake/ml/modeling/feature_selection/variance_threshold.py +76 -106
  107. snowflake/ml/modeling/framework/base.py +2 -2
  108. snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +96 -124
  109. snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +96 -124
  110. snowflake/ml/modeling/impute/iterative_imputer.py +94 -124
  111. snowflake/ml/modeling/impute/knn_imputer.py +94 -124
  112. snowflake/ml/modeling/impute/missing_indicator.py +94 -124
  113. snowflake/ml/modeling/impute/simple_imputer.py +1 -1
  114. snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +77 -107
  115. snowflake/ml/modeling/kernel_approximation/nystroem.py +94 -124
  116. snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +94 -124
  117. snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +86 -116
  118. snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +84 -114
  119. snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +96 -124
  120. snowflake/ml/modeling/lightgbm/lgbm_classifier.py +71 -100
  121. snowflake/ml/modeling/lightgbm/lgbm_regressor.py +71 -100
  122. snowflake/ml/modeling/linear_model/ard_regression.py +96 -124
  123. snowflake/ml/modeling/linear_model/bayesian_ridge.py +96 -124
  124. snowflake/ml/modeling/linear_model/elastic_net.py +96 -124
  125. snowflake/ml/modeling/linear_model/elastic_net_cv.py +96 -124
  126. snowflake/ml/modeling/linear_model/gamma_regressor.py +96 -124
  127. snowflake/ml/modeling/linear_model/huber_regressor.py +96 -124
  128. snowflake/ml/modeling/linear_model/lars.py +96 -124
  129. snowflake/ml/modeling/linear_model/lars_cv.py +96 -124
  130. snowflake/ml/modeling/linear_model/lasso.py +96 -124
  131. snowflake/ml/modeling/linear_model/lasso_cv.py +96 -124
  132. snowflake/ml/modeling/linear_model/lasso_lars.py +96 -124
  133. snowflake/ml/modeling/linear_model/lasso_lars_cv.py +96 -124
  134. snowflake/ml/modeling/linear_model/lasso_lars_ic.py +96 -124
  135. snowflake/ml/modeling/linear_model/linear_regression.py +91 -119
  136. snowflake/ml/modeling/linear_model/logistic_regression.py +96 -124
  137. snowflake/ml/modeling/linear_model/logistic_regression_cv.py +96 -124
  138. snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +96 -124
  139. snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +96 -124
  140. snowflake/ml/modeling/linear_model/multi_task_lasso.py +96 -124
  141. snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +96 -124
  142. snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +96 -124
  143. snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +96 -124
  144. snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +95 -124
  145. snowflake/ml/modeling/linear_model/perceptron.py +95 -124
  146. snowflake/ml/modeling/linear_model/poisson_regressor.py +96 -124
  147. snowflake/ml/modeling/linear_model/ransac_regressor.py +96 -124
  148. snowflake/ml/modeling/linear_model/ridge.py +96 -124
  149. snowflake/ml/modeling/linear_model/ridge_classifier.py +96 -124
  150. snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +96 -124
  151. snowflake/ml/modeling/linear_model/ridge_cv.py +96 -124
  152. snowflake/ml/modeling/linear_model/sgd_classifier.py +96 -124
  153. snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +94 -124
  154. snowflake/ml/modeling/linear_model/sgd_regressor.py +96 -124
  155. snowflake/ml/modeling/linear_model/theil_sen_regressor.py +96 -124
  156. snowflake/ml/modeling/linear_model/tweedie_regressor.py +96 -124
  157. snowflake/ml/modeling/manifold/isomap.py +94 -124
  158. snowflake/ml/modeling/manifold/mds.py +94 -124
  159. snowflake/ml/modeling/manifold/spectral_embedding.py +94 -124
  160. snowflake/ml/modeling/manifold/tsne.py +94 -124
  161. snowflake/ml/modeling/metrics/classification.py +187 -52
  162. snowflake/ml/modeling/metrics/correlation.py +4 -2
  163. snowflake/ml/modeling/metrics/covariance.py +7 -4
  164. snowflake/ml/modeling/metrics/ranking.py +32 -16
  165. snowflake/ml/modeling/metrics/regression.py +60 -32
  166. snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +94 -124
  167. snowflake/ml/modeling/mixture/gaussian_mixture.py +94 -124
  168. snowflake/ml/modeling/model_selection/grid_search_cv.py +88 -138
  169. snowflake/ml/modeling/model_selection/randomized_search_cv.py +90 -144
  170. snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +86 -114
  171. snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +93 -121
  172. snowflake/ml/modeling/multiclass/output_code_classifier.py +94 -122
  173. snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +92 -120
  174. snowflake/ml/modeling/naive_bayes/categorical_nb.py +96 -124
  175. snowflake/ml/modeling/naive_bayes/complement_nb.py +92 -120
  176. snowflake/ml/modeling/naive_bayes/gaussian_nb.py +79 -107
  177. snowflake/ml/modeling/naive_bayes/multinomial_nb.py +88 -116
  178. snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +96 -124
  179. snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +96 -124
  180. snowflake/ml/modeling/neighbors/kernel_density.py +94 -124
  181. snowflake/ml/modeling/neighbors/local_outlier_factor.py +94 -124
  182. snowflake/ml/modeling/neighbors/nearest_centroid.py +89 -117
  183. snowflake/ml/modeling/neighbors/nearest_neighbors.py +94 -124
  184. snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +96 -124
  185. snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +96 -124
  186. snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +96 -124
  187. snowflake/ml/modeling/neural_network/bernoulli_rbm.py +94 -124
  188. snowflake/ml/modeling/neural_network/mlp_classifier.py +96 -124
  189. snowflake/ml/modeling/neural_network/mlp_regressor.py +96 -124
  190. snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
  191. snowflake/ml/modeling/preprocessing/binarizer.py +14 -9
  192. snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +0 -4
  193. snowflake/ml/modeling/preprocessing/label_encoder.py +21 -13
  194. snowflake/ml/modeling/preprocessing/max_abs_scaler.py +20 -14
  195. snowflake/ml/modeling/preprocessing/min_max_scaler.py +35 -19
  196. snowflake/ml/modeling/preprocessing/normalizer.py +6 -9
  197. snowflake/ml/modeling/preprocessing/one_hot_encoder.py +20 -13
  198. snowflake/ml/modeling/preprocessing/ordinal_encoder.py +25 -13
  199. snowflake/ml/modeling/preprocessing/polynomial_features.py +94 -124
  200. snowflake/ml/modeling/preprocessing/robust_scaler.py +28 -14
  201. snowflake/ml/modeling/preprocessing/standard_scaler.py +25 -13
  202. snowflake/ml/modeling/semi_supervised/label_propagation.py +96 -124
  203. snowflake/ml/modeling/semi_supervised/label_spreading.py +96 -124
  204. snowflake/ml/modeling/svm/linear_svc.py +96 -124
  205. snowflake/ml/modeling/svm/linear_svr.py +96 -124
  206. snowflake/ml/modeling/svm/nu_svc.py +96 -124
  207. snowflake/ml/modeling/svm/nu_svr.py +96 -124
  208. snowflake/ml/modeling/svm/svc.py +96 -124
  209. snowflake/ml/modeling/svm/svr.py +96 -124
  210. snowflake/ml/modeling/tree/decision_tree_classifier.py +96 -124
  211. snowflake/ml/modeling/tree/decision_tree_regressor.py +96 -124
  212. snowflake/ml/modeling/tree/extra_tree_classifier.py +96 -124
  213. snowflake/ml/modeling/tree/extra_tree_regressor.py +96 -124
  214. snowflake/ml/modeling/xgboost/xgb_classifier.py +96 -125
  215. snowflake/ml/modeling/xgboost/xgb_regressor.py +96 -125
  216. snowflake/ml/modeling/xgboost/xgbrf_classifier.py +96 -125
  217. snowflake/ml/modeling/xgboost/xgbrf_regressor.py +96 -125
  218. snowflake/ml/registry/model_registry.py +2 -0
  219. snowflake/ml/registry/registry.py +215 -0
  220. snowflake/ml/version.py +1 -1
  221. {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +21 -3
  222. snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
  223. snowflake_ml_python-1.1.1.dist-info/RECORD +0 -331
  224. {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
22
22
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
23
23
  from snowflake.ml._internal import telemetry
24
24
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
25
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
25
26
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
26
- from snowflake.snowpark import DataFrame
27
+ from snowflake.snowpark import DataFrame, Session
27
28
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
28
29
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
30
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
31
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
29
32
  from snowflake.ml.modeling._internal.estimator_utils import (
30
33
  gather_dependencies,
31
34
  original_estimator_has_callable,
32
35
  transform_snowml_obj_to_sklearn_obj,
33
36
  validate_sklearn_args,
34
37
  )
35
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
36
38
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
37
39
 
38
40
  from snowflake.ml.model.model_signature import (
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
52
54
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.neural_network".replace("sklearn.", "").split("_")])
53
55
 
54
56
 
55
-
56
57
  class MLPRegressor(BaseTransformer):
57
58
  r"""Multi-layer Perceptron regressor
58
59
  For more details on this class, see [sklearn.neural_network.MLPRegressor]
@@ -60,6 +61,51 @@ class MLPRegressor(BaseTransformer):
60
61
 
61
62
  Parameters
62
63
  ----------
64
+
65
+ input_cols: Optional[Union[str, List[str]]]
66
+ A string or list of strings representing column names that contain features.
67
+ If this parameter is not specified, all columns in the input DataFrame except
68
+ the columns specified by label_cols, sample_weight_col, and passthrough_cols
69
+ parameters are considered input columns. Input columns can also be set after
70
+ initialization with the `set_input_cols` method.
71
+
72
+ label_cols: Optional[Union[str, List[str]]]
73
+ A string or list of strings representing column names that contain labels.
74
+ Label columns must be specified with this parameter during initialization
75
+ or with the `set_label_cols` method before fitting.
76
+
77
+ output_cols: Optional[Union[str, List[str]]]
78
+ A string or list of strings representing column names that will store the
79
+ output of predict and transform operations. The length of output_cols must
80
+ match the expected number of output columns from the specific predictor or
81
+ transformer class used.
82
+ If you omit this parameter, output column names are derived by adding an
83
+ OUTPUT_ prefix to the label column names for supervised estimators, or
84
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
85
+ work for predictors, but output_cols must be set explicitly for transformers.
86
+ In general, explicitly specifying output column names is clearer, especially
87
+ if you don’t specify the input column names.
88
+ To transform in place, pass the same names for input_cols and output_cols.
89
+ be set explicitly for transformers. Output columns can also be set after
90
+ initialization with the `set_output_cols` method.
91
+
92
+ sample_weight_col: Optional[str]
93
+ A string representing the column name containing the sample weights.
94
+ This argument is only required when working with weighted datasets. Sample
95
+ weight column can also be set after initialization with the
96
+ `set_sample_weight_col` method.
97
+
98
+ passthrough_cols: Optional[Union[str, List[str]]]
99
+ A string or a list of strings indicating column names to be excluded from any
100
+ operations (such as train, transform, or inference). These specified column(s)
101
+ will remain untouched throughout the process. This option is helpful in scenarios
102
+ requiring automatic input_cols inference, but need to avoid using specific
103
+ columns, like index columns, during training or inference. Passthrough columns
104
+ can also be set after initialization with the `set_passthrough_cols` method.
105
+
106
+ drop_input_cols: Optional[bool], default=False
107
+ If set, the response of predict(), transform() methods will not contain input columns.
108
+
63
109
  hidden_layer_sizes: array-like of shape(n_layers - 2,), default=(100,)
64
110
  The ith element represents the number of neurons in the ith
65
111
  hidden layer.
@@ -205,42 +251,6 @@ class MLPRegressor(BaseTransformer):
205
251
  of iterations reaches max_iter, or this number of function calls.
206
252
  Note that number of function calls will be greater than or equal to
207
253
  the number of iterations for the MLPRegressor.
208
-
209
- input_cols: Optional[Union[str, List[str]]]
210
- A string or list of strings representing column names that contain features.
211
- If this parameter is not specified, all columns in the input DataFrame except
212
- the columns specified by label_cols, sample_weight_col, and passthrough_cols
213
- parameters are considered input columns.
214
-
215
- label_cols: Optional[Union[str, List[str]]]
216
- A string or list of strings representing column names that contain labels.
217
- This is a required param for estimators, as there is no way to infer these
218
- columns. If this parameter is not specified, then object is fitted without
219
- labels (like a transformer).
220
-
221
- output_cols: Optional[Union[str, List[str]]]
222
- A string or list of strings representing column names that will store the
223
- output of predict and transform operations. The length of output_cols must
224
- match the expected number of output columns from the specific estimator or
225
- transformer class used.
226
- If this parameter is not specified, output column names are derived by
227
- adding an OUTPUT_ prefix to the label column names. These inferred output
228
- column names work for estimator's predict() method, but output_cols must
229
- be set explicitly for transformers.
230
-
231
- sample_weight_col: Optional[str]
232
- A string representing the column name containing the sample weights.
233
- This argument is only required when working with weighted datasets.
234
-
235
- passthrough_cols: Optional[Union[str, List[str]]]
236
- A string or a list of strings indicating column names to be excluded from any
237
- operations (such as train, transform, or inference). These specified column(s)
238
- will remain untouched throughout the process. This option is helpful in scenarios
239
- requiring automatic input_cols inference, but need to avoid using specific
240
- columns, like index columns, during training or inference.
241
-
242
- drop_input_cols: Optional[bool], default=False
243
- If set, the response of predict(), transform() methods will not contain input columns.
244
254
  """
245
255
 
246
256
  def __init__( # type: ignore[no-untyped-def]
@@ -284,7 +294,7 @@ class MLPRegressor(BaseTransformer):
284
294
  self.set_passthrough_cols(passthrough_cols)
285
295
  self.set_drop_input_cols(drop_input_cols)
286
296
  self.set_sample_weight_col(sample_weight_col)
287
- deps = set(SklearnWrapperProvider().dependencies)
297
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
288
298
 
289
299
  self._deps = list(deps)
290
300
 
@@ -315,13 +325,14 @@ class MLPRegressor(BaseTransformer):
315
325
  args=init_args,
316
326
  klass=sklearn.neural_network.MLPRegressor
317
327
  )
318
- self._sklearn_object = sklearn.neural_network.MLPRegressor(
328
+ self._sklearn_object: Any = sklearn.neural_network.MLPRegressor(
319
329
  **cleaned_up_init_args,
320
330
  )
321
331
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
322
332
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
323
333
  self._snowpark_cols: Optional[List[str]] = self.input_cols
324
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=MLPRegressor.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
334
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=MLPRegressor.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
335
+ self._autogenerated = True
325
336
 
326
337
  def _get_rand_id(self) -> str:
327
338
  """
@@ -377,54 +388,48 @@ class MLPRegressor(BaseTransformer):
377
388
  self
378
389
  """
379
390
  self._infer_input_output_cols(dataset)
380
- if isinstance(dataset, pd.DataFrame):
381
- assert self._sklearn_object is not None # keep mypy happy
382
- self._sklearn_object = self._handlers.fit_pandas(
383
- dataset,
384
- self._sklearn_object,
385
- self.input_cols,
386
- self.label_cols,
387
- self.sample_weight_col
388
- )
389
- elif isinstance(dataset, DataFrame):
390
- self._fit_snowpark(dataset)
391
- else:
392
- raise TypeError(
393
- f"Unexpected dataset type: {type(dataset)}."
394
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
395
- )
391
+ if isinstance(dataset, DataFrame):
392
+ session = dataset._session
393
+ assert session is not None # keep mypy happy
394
+ # Validate that key package version in user workspace are supported in snowflake conda channel
395
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
396
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
397
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
398
+
399
+ # Specify input columns so column pruning will be enforced
400
+ selected_cols = self._get_active_columns()
401
+ if len(selected_cols) > 0:
402
+ dataset = dataset.select(selected_cols)
403
+
404
+ self._snowpark_cols = dataset.select(self.input_cols).columns
405
+
406
+ # If we are already in a stored procedure, no need to kick off another one.
407
+ if SNOWML_SPROC_ENV in os.environ:
408
+ statement_params = telemetry.get_function_usage_statement_params(
409
+ project=_PROJECT,
410
+ subproject=_SUBPROJECT,
411
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), MLPRegressor.__class__.__name__),
412
+ api_calls=[Session.call],
413
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
414
+ )
415
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
416
+ pd_df.columns = dataset.columns
417
+ dataset = pd_df
418
+
419
+ model_trainer = ModelTrainerBuilder.build(
420
+ estimator=self._sklearn_object,
421
+ dataset=dataset,
422
+ input_cols=self.input_cols,
423
+ label_cols=self.label_cols,
424
+ sample_weight_col=self.sample_weight_col,
425
+ autogenerated=self._autogenerated,
426
+ subproject=_SUBPROJECT
427
+ )
428
+ self._sklearn_object = model_trainer.train()
396
429
  self._is_fitted = True
397
430
  self._get_model_signatures(dataset)
398
431
  return self
399
432
 
400
- def _fit_snowpark(self, dataset: DataFrame) -> None:
401
- session = dataset._session
402
- assert session is not None # keep mypy happy
403
- # Validate that key package version in user workspace are supported in snowflake conda channel
404
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
405
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
406
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
407
-
408
- # Specify input columns so column pruning will be enforced
409
- selected_cols = self._get_active_columns()
410
- if len(selected_cols) > 0:
411
- dataset = dataset.select(selected_cols)
412
-
413
- estimator = self._sklearn_object
414
- assert estimator is not None # Keep mypy happy
415
-
416
- self._snowpark_cols = dataset.select(self.input_cols).columns
417
-
418
- self._sklearn_object = self._handlers.fit_snowpark(
419
- dataset,
420
- session,
421
- estimator,
422
- ["snowflake-snowpark-python"] + self._get_dependencies(),
423
- self.input_cols,
424
- self.label_cols,
425
- self.sample_weight_col,
426
- )
427
-
428
433
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
429
434
  if self._drop_input_cols:
430
435
  return []
@@ -612,11 +617,6 @@ class MLPRegressor(BaseTransformer):
612
617
  subproject=_SUBPROJECT,
613
618
  custom_tags=dict([("autogen", True)]),
614
619
  )
615
- @telemetry.add_stmt_params_to_df(
616
- project=_PROJECT,
617
- subproject=_SUBPROJECT,
618
- custom_tags=dict([("autogen", True)]),
619
- )
620
620
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
621
621
  """Predict using the multi-layer perceptron model
622
622
  For more details on this function, see [sklearn.neural_network.MLPRegressor.predict]
@@ -670,11 +670,6 @@ class MLPRegressor(BaseTransformer):
670
670
  subproject=_SUBPROJECT,
671
671
  custom_tags=dict([("autogen", True)]),
672
672
  )
673
- @telemetry.add_stmt_params_to_df(
674
- project=_PROJECT,
675
- subproject=_SUBPROJECT,
676
- custom_tags=dict([("autogen", True)]),
677
- )
678
673
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
679
674
  """Method not supported for this class.
680
675
 
@@ -731,7 +726,8 @@ class MLPRegressor(BaseTransformer):
731
726
  if False:
732
727
  self.fit(dataset)
733
728
  assert self._sklearn_object is not None
734
- return self._sklearn_object.labels_
729
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
730
+ return labels
735
731
  else:
736
732
  raise NotImplementedError
737
733
 
@@ -767,6 +763,7 @@ class MLPRegressor(BaseTransformer):
767
763
  output_cols = []
768
764
 
769
765
  # Make sure column names are valid snowflake identifiers.
766
+ assert output_cols is not None # Make MyPy happy
770
767
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
771
768
 
772
769
  return rv
@@ -777,11 +774,6 @@ class MLPRegressor(BaseTransformer):
777
774
  subproject=_SUBPROJECT,
778
775
  custom_tags=dict([("autogen", True)]),
779
776
  )
780
- @telemetry.add_stmt_params_to_df(
781
- project=_PROJECT,
782
- subproject=_SUBPROJECT,
783
- custom_tags=dict([("autogen", True)]),
784
- )
785
777
  def predict_proba(
786
778
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
787
779
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -822,11 +814,6 @@ class MLPRegressor(BaseTransformer):
822
814
  subproject=_SUBPROJECT,
823
815
  custom_tags=dict([("autogen", True)]),
824
816
  )
825
- @telemetry.add_stmt_params_to_df(
826
- project=_PROJECT,
827
- subproject=_SUBPROJECT,
828
- custom_tags=dict([("autogen", True)]),
829
- )
830
817
  def predict_log_proba(
831
818
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
832
819
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -863,16 +850,6 @@ class MLPRegressor(BaseTransformer):
863
850
  return output_df
864
851
 
865
852
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
866
- @telemetry.send_api_usage_telemetry(
867
- project=_PROJECT,
868
- subproject=_SUBPROJECT,
869
- custom_tags=dict([("autogen", True)]),
870
- )
871
- @telemetry.add_stmt_params_to_df(
872
- project=_PROJECT,
873
- subproject=_SUBPROJECT,
874
- custom_tags=dict([("autogen", True)]),
875
- )
876
853
  def decision_function(
877
854
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
878
855
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -973,11 +950,6 @@ class MLPRegressor(BaseTransformer):
973
950
  subproject=_SUBPROJECT,
974
951
  custom_tags=dict([("autogen", True)]),
975
952
  )
976
- @telemetry.add_stmt_params_to_df(
977
- project=_PROJECT,
978
- subproject=_SUBPROJECT,
979
- custom_tags=dict([("autogen", True)]),
980
- )
981
953
  def kneighbors(
982
954
  self,
983
955
  dataset: Union[DataFrame, pd.DataFrame],
@@ -1037,9 +1009,9 @@ class MLPRegressor(BaseTransformer):
1037
1009
  # For classifier, the type of predict is the same as the type of label
1038
1010
  if self._sklearn_object._estimator_type == 'classifier':
1039
1011
  # label columns is the desired type for output
1040
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
1012
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
1041
1013
  # rename the output columns
1042
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
1014
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
1043
1015
  self._model_signature_dict["predict"] = ModelSignature(inputs,
1044
1016
  ([] if self._drop_input_cols else inputs)
1045
1017
  + outputs)
@@ -1,8 +1,4 @@
1
1
  """Disables the distributed implementation of Grid Search and Randomized Search CV"""
2
- from snowflake.ml.modeling.model_selection.grid_search_cv import GridSearchCV
3
- from snowflake.ml.modeling.model_selection.randomized_search_cv import (
4
- RandomizedSearchCV,
5
- )
2
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
6
3
 
7
- GridSearchCV._ENABLE_DISTRIBUTED = False
8
- RandomizedSearchCV._ENABLE_DISTRIBUTED = False
4
+ ModelTrainerBuilder._ENABLE_DISTRIBUTED = False
@@ -21,16 +21,25 @@ class Binarizer(base.BaseTransformer):
21
21
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Binarizer.html).
22
22
 
23
23
  Args:
24
- threshold: Feature values below or equal to this are replaced by 0, above it by 1. Default values is 0.0.
25
- input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be binarized.
26
- output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
24
+ threshold: float, default=0.0
25
+ Feature values below or equal to this are replaced by 0, above it by 1. Default values is 0.0.
26
+
27
+ input_cols: Optional[Union[str, Iterable[str]]], default=None
28
+ The name(s) of one or more columns in a DataFrame containing a feature to be binarized.
29
+
30
+ output_cols: Optional[Union[str, Iterable[str]]], default=None
31
+ The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
27
32
  columns specified must match the number of input columns.
28
- passthrough_cols: A string or a list of strings indicating column names to be excluded from any
33
+
34
+ passthrough_cols: Optional[Union[str, Iterable[str]]], default=None
35
+ A string or a list of strings indicating column names to be excluded from any
29
36
  operations (such as train, transform, or inference). These specified column(s)
30
37
  will remain untouched throughout the process. This option is helpful in scenarios
31
38
  requiring automatic input_cols inference, but need to avoid using specific
32
39
  columns, like index columns, during training or inference.
33
- drop_input_cols: Remove input columns from output if set True. False by default.
40
+
41
+ drop_input_cols: Optional[bool], default=False
42
+ Remove input columns from output if set True. False by default.
34
43
  """
35
44
 
36
45
  def __init__(
@@ -108,10 +117,6 @@ class Binarizer(base.BaseTransformer):
108
117
  project=base.PROJECT,
109
118
  subproject=base.SUBPROJECT,
110
119
  )
111
- @telemetry.add_stmt_params_to_df(
112
- project=base.PROJECT,
113
- subproject=base.SUBPROJECT,
114
- )
115
120
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
116
121
  """
117
122
  Binarize the data. Map to 1 if it is strictly greater than the threshold, otherwise 0.
@@ -177,10 +177,6 @@ class KBinsDiscretizer(base.BaseTransformer):
177
177
  project=base.PROJECT,
178
178
  subproject=base.SUBPROJECT,
179
179
  )
180
- @telemetry.add_stmt_params_to_df(
181
- project=base.PROJECT,
182
- subproject=base.SUBPROJECT,
183
- )
184
180
  def transform(
185
181
  self, dataset: Union[snowpark.DataFrame, pd.DataFrame]
186
182
  ) -> Union[snowpark.DataFrame, pd.DataFrame, sparse.csr_matrix]:
@@ -24,15 +24,22 @@ class LabelEncoder(base.BaseTransformer):
24
24
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html).
25
25
 
26
26
  Args:
27
- input_cols: The name of a column in a DataFrame to be encoded. May be a string or a list containing one string.
28
- output_cols: The name of a column in a DataFrame where the results will be stored. May be a string or a list
27
+ input_cols: Optional[Union[str, List[str]]]
28
+ The name of a column in a DataFrame to be encoded. May be a string or a list containing one string.
29
+
30
+ output_cols: Optional[Union[str, List[str]]]
31
+ The name of a column in a DataFrame where the results will be stored. May be a string or a list
29
32
  containing one string.
30
- passthrough_cols: A string or a list of strings indicating column names to be excluded from any
33
+
34
+ passthrough_cols: Optional[Union[str, List[str]]]
35
+ A string or a list of strings indicating column names to be excluded from any
31
36
  operations (such as train, transform, or inference). These specified column(s)
32
37
  will remain untouched throughout the process. This option is helpful in scenarios
33
38
  requiring automatic input_cols inference, but need to avoid using specific
34
39
  columns, like index columns, during training or inference.
35
- drop_input_cols: Remove input columns from output if set True. False by default.
40
+
41
+ drop_input_cols: Optional[bool], default=False
42
+ Remove input columns from output if set True. False by default.
36
43
  """
37
44
 
38
45
  def __init__(
@@ -46,19 +53,24 @@ class LabelEncoder(base.BaseTransformer):
46
53
  Encode target labels with integers between 0 and n_classes-1.
47
54
 
48
55
  Args:
49
- input_cols: The name of a column in a DataFrame to be encoded. May be a string or a list containing one
56
+ input_cols: Optional[Union[str, List[str]]]
57
+ The name of a column in a DataFrame to be encoded. May be a string or a list containing one
50
58
  string.
51
- output_cols: The name of a column in a DataFrame where the results will be stored. May be a string or a list
59
+ output_cols: Optional[Union[str, List[str]]]
60
+ The name of a column in a DataFrame where the results will be stored. May be a string or a list
52
61
  containing one string.
53
- passthrough_cols: A string or a list of strings indicating column names to be excluded from any
62
+ passthrough_cols: Optional[Union[str, List[str]]]
63
+ A string or a list of strings indicating column names to be excluded from any
54
64
  operations (such as train, transform, or inference). These specified column(s)
55
65
  will remain untouched throughout the process. This option is helful in scenarios
56
66
  requiring automatic input_cols inference, but need to avoid using specific
57
67
  columns, like index columns, during in training or inference.
58
- drop_input_cols: Remove input columns from output if set True. False by default.
68
+ drop_input_cols: Optional[bool], default=False
69
+ Remove input columns from output if set True. False by default.
59
70
 
60
71
  Attributes:
61
- classes_: A np.ndarray that holds the label for each class.
72
+ classes_: Optional[type_utils.LiteralNDArrayType]
73
+ A np.ndarray that holds the label for each class.
62
74
  Attributes are valid only after fit() has been called.
63
75
 
64
76
  """
@@ -126,10 +138,6 @@ class LabelEncoder(base.BaseTransformer):
126
138
  project=base.PROJECT,
127
139
  subproject=base.SUBPROJECT,
128
140
  )
129
- @telemetry.add_stmt_params_to_df(
130
- project=base.PROJECT,
131
- subproject=base.SUBPROJECT,
132
- )
133
141
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
134
142
  """
135
143
  Use fit result to transform snowpark dataframe or pandas dataframe. The original dataset with
@@ -27,19 +27,29 @@ class MaxAbsScaler(base.BaseTransformer):
27
27
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html).
28
28
 
29
29
  Args:
30
- input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
31
- output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
30
+ input_cols: Optional[Union[str, List[str]]], default=None
31
+ The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
32
+
33
+ output_cols: Optional[Union[str, List[str]]], default=None
34
+ The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
32
35
  columns specified must match the number of input columns.
33
- passthrough_cols: A string or a list of strings indicating column names to be excluded from any
34
- operations (such as train, transform, or inference). These specified column(s)
35
- will remain untouched throughout the process. This option is helpful in scenarios
36
- requiring automatic input_cols inference, but need to avoid using specific
37
- columns, like index columns, during training or inference.
38
- drop_input_cols: Remove input columns from output if set True. False by default.
36
+
37
+ passthrough_cols: Optional[Union[str, List[str]]], default=None
38
+ A string or a list of strings indicating column names to be excluded from any
39
+ operations (such as train, transform, or inference). These specified column(s)
40
+ will remain untouched throughout the process. This option is helpful in scenarios
41
+ requiring automatic input_cols inference, but need to avoid using specific
42
+ columns, like index columns, during training or inference.
43
+
44
+ drop_input_cols: Optional[bool], default=False
45
+ Remove input columns from output if set True. False by default.
39
46
 
40
47
  Attributes:
41
- scale_: dict {column_name: value} or None. Per-feature relative scaling factor.
42
- max_abs_: dict {column_name: value} or None. Per-feature maximum absolute value.
48
+ scale_: Dict[str, float]
49
+ dict {column_name: value} or None. Per-feature relative scaling factor.
50
+
51
+ max_abs_: Dict[str, float]
52
+ dict {column_name: value} or None. Per-feature maximum absolute value.
43
53
  """
44
54
 
45
55
  def __init__(
@@ -150,10 +160,6 @@ class MaxAbsScaler(base.BaseTransformer):
150
160
  project=base.PROJECT,
151
161
  subproject=base.SUBPROJECT,
152
162
  )
153
- @telemetry.add_stmt_params_to_df(
154
- project=base.PROJECT,
155
- subproject=base.SUBPROJECT,
156
- )
157
163
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
158
164
  """
159
165
  Scale the data.
@@ -21,25 +21,45 @@ class MinMaxScaler(base.BaseTransformer):
21
21
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html).
22
22
 
23
23
  Args:
24
- feature_range: Desired range of transformed data (default is 0 to 1).
25
- clip: Whether to clip transformed values of held-out data to the specified feature range (default is True).
26
- input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be scaled. Each specified
24
+ feature_range: Tuple[float, float], default=(0, 1)
25
+ Desired range of transformed data (default is 0 to 1).
26
+
27
+ clip: bool, default=False
28
+ Whether to clip transformed values of held-out data to the specified feature range (default is True).
29
+
30
+ input_cols: Optional[Union[str, List[str]]], default=None
31
+ The name(s) of one or more columns in a DataFrame containing a feature to be scaled. Each specified
27
32
  input column is scaled independently and stored in the corresponding output column.
28
- output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
33
+
34
+ output_cols: Optional[Union[str, List[str]]], default=None
35
+ The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
29
36
  columns specified must match the number of input columns.
30
- passthrough_cols: A string or a list of strings indicating column names to be excluded from any
31
- operations (such as train, transform, or inference). These specified column(s)
32
- will remain untouched throughout the process. This option is helpful in scenarios
33
- requiring automatic input_cols inference, but need to avoid using specific
34
- columns, like index columns, during training or inference.
35
- drop_input_cols: Remove input columns from output if set True. False by default.
37
+
38
+ passthrough_cols: Optional[Union[str, List[str]]], default=None
39
+ A string or a list of strings indicating column names to be excluded from any
40
+ operations (such as train, transform, or inference). These specified column(s)
41
+ will remain untouched throughout the process. This option is helpful in scenarios
42
+ requiring automatic input_cols inference, but need to avoid using specific
43
+ columns, like index columns, during training or inference.
44
+
45
+ drop_input_cols: Optional[bool], default=False
46
+ Remove input columns from output if set True. False by default.
36
47
 
37
48
  Attributes:
38
- min_: dict {column_name: value} or None. Per-feature adjustment for minimum.
39
- scale_: dict {column_name: value} or None. Per-feature relative scaling factor.
40
- data_min_: dict {column_name: value} or None. Per-feature minimum seen in the data.
41
- data_max_: dict {column_name: value} or None. Per-feature maximum seen in the data.
42
- data_range_: dict {column_name: value} or None. Per-feature range seen in the data as a (min, max) tuple.
49
+ min_: Dict[str, float]
50
+ dict {column_name: value} or None. Per-feature adjustment for minimum.
51
+
52
+ scale_: Dict[str, float]
53
+ dict {column_name: value} or None. Per-feature relative scaling factor.
54
+
55
+ data_min_: Dict[str, float]
56
+ dict {column_name: value} or None. Per-feature minimum seen in the data.
57
+
58
+ data_max_: Dict[str, float]
59
+ dict {column_name: value} or None. Per-feature maximum seen in the data.
60
+
61
+ data_range_: Dict[str, float]
62
+ dict {column_name: value} or None. Per-feature range seen in the data as a (min, max) tuple.
43
63
  """
44
64
 
45
65
  def __init__(
@@ -170,10 +190,6 @@ class MinMaxScaler(base.BaseTransformer):
170
190
  project=base.PROJECT,
171
191
  subproject=base.SUBPROJECT,
172
192
  )
173
- @telemetry.add_stmt_params_to_df(
174
- project=base.PROJECT,
175
- subproject=base.SUBPROJECT,
176
- )
177
193
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
178
194
  """
179
195
  Scale features according to feature_range.
@@ -34,11 +34,12 @@ class Normalizer(base.BaseTransformer):
34
34
  A string or list of strings representing column names that will store the output of transform operation.
35
35
  The length of `output_cols` must equal the length of `input_cols`.
36
36
 
37
- passthrough_cols: A string or a list of strings indicating column names to be excluded from any
38
- operations (such as train, transform, or inference). These specified column(s)
39
- will remain untouched throughout the process. This option is helpful in scenarios
40
- requiring automatic input_cols inference, but need to avoid using specific
41
- columns, like index columns, during training or inference.
37
+ passthrough_cols: Optional[Union[str, List[str]]]
38
+ A string or a list of strings indicating column names to be excluded from any
39
+ operations (such as train, transform, or inference). These specified column(s)
40
+ will remain untouched throughout the process. This option is helpful in scenarios
41
+ requiring automatic input_cols inference, but need to avoid using specific
42
+ columns, like index columns, during training or inference.
42
43
 
43
44
  drop_input_cols: bool, default=False
44
45
  Remove input columns from output if set `True`.
@@ -90,10 +91,6 @@ class Normalizer(base.BaseTransformer):
90
91
  project=base.PROJECT,
91
92
  subproject=base.SUBPROJECT,
92
93
  )
93
- @telemetry.add_stmt_params_to_df(
94
- project=base.PROJECT,
95
- subproject=base.SUBPROJECT,
96
- )
97
94
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
98
95
  """
99
96
  Scale each non-zero row of the input dataset to the unit norm.