snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. snowflake/cortex/_complete.py +1 -1
  2. snowflake/cortex/_extract_answer.py +1 -1
  3. snowflake/cortex/_sentiment.py +1 -1
  4. snowflake/cortex/_summarize.py +1 -1
  5. snowflake/cortex/_translate.py +1 -1
  6. snowflake/ml/_internal/env_utils.py +68 -6
  7. snowflake/ml/_internal/file_utils.py +34 -4
  8. snowflake/ml/_internal/telemetry.py +79 -91
  9. snowflake/ml/_internal/utils/identifier.py +78 -72
  10. snowflake/ml/_internal/utils/retryable_http.py +16 -4
  11. snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
  12. snowflake/ml/dataset/dataset.py +1 -1
  13. snowflake/ml/model/_api.py +21 -14
  14. snowflake/ml/model/_client/model/model_impl.py +176 -0
  15. snowflake/ml/model/_client/model/model_method_info.py +19 -0
  16. snowflake/ml/model/_client/model/model_version_impl.py +291 -0
  17. snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
  18. snowflake/ml/model/_client/ops/model_ops.py +308 -0
  19. snowflake/ml/model/_client/sql/model.py +75 -0
  20. snowflake/ml/model/_client/sql/model_version.py +213 -0
  21. snowflake/ml/model/_client/sql/stage.py +40 -0
  22. snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
  23. snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
  24. snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
  25. snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
  26. snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
  27. snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
  28. snowflake/ml/model/_model_composer/model_composer.py +31 -9
  29. snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
  30. snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
  31. snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
  32. snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
  33. snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
  34. snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
  35. snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
  36. snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
  37. snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
  38. snowflake/ml/model/model_signature.py +108 -53
  39. snowflake/ml/model/type_hints.py +1 -0
  40. snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
  41. snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
  42. snowflake/ml/modeling/_internal/model_specifications.py +146 -0
  43. snowflake/ml/modeling/_internal/model_trainer.py +13 -0
  44. snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
  45. snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
  46. snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
  47. snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
  48. snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
  49. snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
  50. snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
  51. snowflake/ml/modeling/cluster/birch.py +106 -135
  52. snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
  53. snowflake/ml/modeling/cluster/dbscan.py +106 -135
  54. snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
  55. snowflake/ml/modeling/cluster/k_means.py +105 -135
  56. snowflake/ml/modeling/cluster/mean_shift.py +106 -135
  57. snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
  58. snowflake/ml/modeling/cluster/optics.py +106 -135
  59. snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
  60. snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
  61. snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
  62. snowflake/ml/modeling/compose/column_transformer.py +106 -135
  63. snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
  64. snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
  65. snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
  66. snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
  67. snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
  68. snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
  69. snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
  70. snowflake/ml/modeling/covariance/oas.py +99 -128
  71. snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
  72. snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
  73. snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
  74. snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
  75. snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
  76. snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
  77. snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
  78. snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
  79. snowflake/ml/modeling/decomposition/pca.py +106 -135
  80. snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
  81. snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
  82. snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
  83. snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
  84. snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
  85. snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
  86. snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
  87. snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
  88. snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
  89. snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
  90. snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
  91. snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
  92. snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
  93. snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
  94. snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
  95. snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
  96. snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
  97. snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
  98. snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
  99. snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
  100. snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
  101. snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
  102. snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
  103. snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
  104. snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
  105. snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
  106. snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
  107. snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
  108. snowflake/ml/modeling/framework/base.py +83 -1
  109. snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
  110. snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
  111. snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
  112. snowflake/ml/modeling/impute/knn_imputer.py +106 -135
  113. snowflake/ml/modeling/impute/missing_indicator.py +106 -135
  114. snowflake/ml/modeling/impute/simple_imputer.py +9 -1
  115. snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
  116. snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
  117. snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
  118. snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
  119. snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
  120. snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
  121. snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
  122. snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
  123. snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
  124. snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
  125. snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
  126. snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
  127. snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
  128. snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
  129. snowflake/ml/modeling/linear_model/lars.py +108 -135
  130. snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
  131. snowflake/ml/modeling/linear_model/lasso.py +108 -135
  132. snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
  133. snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
  134. snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
  135. snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
  136. snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
  137. snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
  138. snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
  139. snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
  140. snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
  141. snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
  142. snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
  143. snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
  144. snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
  145. snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
  146. snowflake/ml/modeling/linear_model/perceptron.py +107 -135
  147. snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
  148. snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
  149. snowflake/ml/modeling/linear_model/ridge.py +108 -135
  150. snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
  151. snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
  152. snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
  153. snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
  154. snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
  155. snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
  156. snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
  157. snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
  158. snowflake/ml/modeling/manifold/isomap.py +106 -135
  159. snowflake/ml/modeling/manifold/mds.py +106 -135
  160. snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
  161. snowflake/ml/modeling/manifold/tsne.py +106 -135
  162. snowflake/ml/modeling/metrics/classification.py +196 -55
  163. snowflake/ml/modeling/metrics/correlation.py +4 -2
  164. snowflake/ml/modeling/metrics/covariance.py +7 -4
  165. snowflake/ml/modeling/metrics/ranking.py +32 -16
  166. snowflake/ml/modeling/metrics/regression.py +60 -32
  167. snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
  168. snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
  169. snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
  170. snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
  171. snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
  172. snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
  173. snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
  174. snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
  175. snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
  176. snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
  177. snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
  178. snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
  179. snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
  180. snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
  181. snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
  182. snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
  183. snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
  184. snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
  185. snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
  186. snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
  187. snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
  188. snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
  189. snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
  190. snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
  191. snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
  192. snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
  193. snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
  194. snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
  195. snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
  196. snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
  197. snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
  198. snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
  199. snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
  200. snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
  201. snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
  202. snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
  203. snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
  204. snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
  205. snowflake/ml/modeling/svm/linear_svc.py +108 -135
  206. snowflake/ml/modeling/svm/linear_svr.py +108 -135
  207. snowflake/ml/modeling/svm/nu_svc.py +108 -135
  208. snowflake/ml/modeling/svm/nu_svr.py +108 -135
  209. snowflake/ml/modeling/svm/svc.py +108 -135
  210. snowflake/ml/modeling/svm/svr.py +108 -135
  211. snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
  212. snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
  213. snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
  214. snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
  215. snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
  216. snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
  217. snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
  218. snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
  219. snowflake/ml/registry/model_registry.py +2 -0
  220. snowflake/ml/registry/registry.py +215 -0
  221. snowflake/ml/version.py +1 -1
  222. {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
  223. snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
  224. snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
  225. {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
22
22
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
23
23
  from snowflake.ml._internal import telemetry
24
24
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
25
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
25
26
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
26
- from snowflake.snowpark import DataFrame
27
+ from snowflake.snowpark import DataFrame, Session
27
28
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
28
29
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
30
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
31
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
29
32
  from snowflake.ml.modeling._internal.estimator_utils import (
30
33
  gather_dependencies,
31
34
  original_estimator_has_callable,
32
35
  transform_snowml_obj_to_sklearn_obj,
33
36
  validate_sklearn_args,
34
37
  )
35
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
36
38
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
37
39
 
38
40
  from snowflake.ml.model.model_signature import (
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
52
54
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.neural_network".replace("sklearn.", "").split("_")])
53
55
 
54
56
 
55
-
56
57
  class MLPRegressor(BaseTransformer):
57
58
  r"""Multi-layer Perceptron regressor
58
59
  For more details on this class, see [sklearn.neural_network.MLPRegressor]
@@ -60,6 +61,51 @@ class MLPRegressor(BaseTransformer):
60
61
 
61
62
  Parameters
62
63
  ----------
64
+
65
+ input_cols: Optional[Union[str, List[str]]]
66
+ A string or list of strings representing column names that contain features.
67
+ If this parameter is not specified, all columns in the input DataFrame except
68
+ the columns specified by label_cols, sample_weight_col, and passthrough_cols
69
+ parameters are considered input columns. Input columns can also be set after
70
+ initialization with the `set_input_cols` method.
71
+
72
+ label_cols: Optional[Union[str, List[str]]]
73
+ A string or list of strings representing column names that contain labels.
74
+ Label columns must be specified with this parameter during initialization
75
+ or with the `set_label_cols` method before fitting.
76
+
77
+ output_cols: Optional[Union[str, List[str]]]
78
+ A string or list of strings representing column names that will store the
79
+ output of predict and transform operations. The length of output_cols must
80
+ match the expected number of output columns from the specific predictor or
81
+ transformer class used.
82
+ If you omit this parameter, output column names are derived by adding an
83
+ OUTPUT_ prefix to the label column names for supervised estimators, or
84
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
85
+ work for predictors, but output_cols must be set explicitly for transformers.
86
+ In general, explicitly specifying output column names is clearer, especially
87
+ if you don’t specify the input column names.
88
+ To transform in place, pass the same names for input_cols and output_cols.
89
+ be set explicitly for transformers. Output columns can also be set after
90
+ initialization with the `set_output_cols` method.
91
+
92
+ sample_weight_col: Optional[str]
93
+ A string representing the column name containing the sample weights.
94
+ This argument is only required when working with weighted datasets. Sample
95
+ weight column can also be set after initialization with the
96
+ `set_sample_weight_col` method.
97
+
98
+ passthrough_cols: Optional[Union[str, List[str]]]
99
+ A string or a list of strings indicating column names to be excluded from any
100
+ operations (such as train, transform, or inference). These specified column(s)
101
+ will remain untouched throughout the process. This option is helpful in scenarios
102
+ requiring automatic input_cols inference, but need to avoid using specific
103
+ columns, like index columns, during training or inference. Passthrough columns
104
+ can also be set after initialization with the `set_passthrough_cols` method.
105
+
106
+ drop_input_cols: Optional[bool], default=False
107
+ If set, the response of predict(), transform() methods will not contain input columns.
108
+
63
109
  hidden_layer_sizes: array-like of shape(n_layers - 2,), default=(100,)
64
110
  The ith element represents the number of neurons in the ith
65
111
  hidden layer.
@@ -205,35 +251,6 @@ class MLPRegressor(BaseTransformer):
205
251
  of iterations reaches max_iter, or this number of function calls.
206
252
  Note that number of function calls will be greater than or equal to
207
253
  the number of iterations for the MLPRegressor.
208
-
209
- input_cols: Optional[Union[str, List[str]]]
210
- A string or list of strings representing column names that contain features.
211
- If this parameter is not specified, all columns in the input DataFrame except
212
- the columns specified by label_cols and sample_weight_col parameters are
213
- considered input columns.
214
-
215
- label_cols: Optional[Union[str, List[str]]]
216
- A string or list of strings representing column names that contain labels.
217
- This is a required param for estimators, as there is no way to infer these
218
- columns. If this parameter is not specified, then object is fitted without
219
- labels (like a transformer).
220
-
221
- output_cols: Optional[Union[str, List[str]]]
222
- A string or list of strings representing column names that will store the
223
- output of predict and transform operations. The length of output_cols must
224
- match the expected number of output columns from the specific estimator or
225
- transformer class used.
226
- If this parameter is not specified, output column names are derived by
227
- adding an OUTPUT_ prefix to the label column names. These inferred output
228
- column names work for estimator's predict() method, but output_cols must
229
- be set explicitly for transformers.
230
-
231
- sample_weight_col: Optional[str]
232
- A string representing the column name containing the sample weights.
233
- This argument is only required when working with weighted datasets.
234
-
235
- drop_input_cols: Optional[bool], default=False
236
- If set, the response of predict(), transform() methods will not contain input columns.
237
254
  """
238
255
 
239
256
  def __init__( # type: ignore[no-untyped-def]
@@ -265,6 +282,7 @@ class MLPRegressor(BaseTransformer):
265
282
  input_cols: Optional[Union[str, Iterable[str]]] = None,
266
283
  output_cols: Optional[Union[str, Iterable[str]]] = None,
267
284
  label_cols: Optional[Union[str, Iterable[str]]] = None,
285
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
268
286
  drop_input_cols: Optional[bool] = False,
269
287
  sample_weight_col: Optional[str] = None,
270
288
  ) -> None:
@@ -273,9 +291,10 @@ class MLPRegressor(BaseTransformer):
273
291
  self.set_input_cols(input_cols)
274
292
  self.set_output_cols(output_cols)
275
293
  self.set_label_cols(label_cols)
294
+ self.set_passthrough_cols(passthrough_cols)
276
295
  self.set_drop_input_cols(drop_input_cols)
277
296
  self.set_sample_weight_col(sample_weight_col)
278
- deps = set(SklearnWrapperProvider().dependencies)
297
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
279
298
 
280
299
  self._deps = list(deps)
281
300
 
@@ -306,13 +325,14 @@ class MLPRegressor(BaseTransformer):
306
325
  args=init_args,
307
326
  klass=sklearn.neural_network.MLPRegressor
308
327
  )
309
- self._sklearn_object = sklearn.neural_network.MLPRegressor(
328
+ self._sklearn_object: Any = sklearn.neural_network.MLPRegressor(
310
329
  **cleaned_up_init_args,
311
330
  )
312
331
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
313
332
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
314
333
  self._snowpark_cols: Optional[List[str]] = self.input_cols
315
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=MLPRegressor.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
334
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=MLPRegressor.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
335
+ self._autogenerated = True
316
336
 
317
337
  def _get_rand_id(self) -> str:
318
338
  """
@@ -323,24 +343,6 @@ class MLPRegressor(BaseTransformer):
323
343
  """
324
344
  return str(uuid4()).replace("-", "_").upper()
325
345
 
326
- def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
327
- """
328
- Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
329
-
330
- Args:
331
- dataset: Input dataset.
332
- """
333
- if not self.input_cols:
334
- cols = [
335
- c for c in dataset.columns
336
- if c not in self.get_label_cols() and c != self.sample_weight_col
337
- ]
338
- self.set_input_cols(input_cols=cols)
339
-
340
- if not self.output_cols:
341
- cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
342
- self.set_output_cols(output_cols=cols)
343
-
344
346
  def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "MLPRegressor":
345
347
  """
346
348
  Input columns setter.
@@ -386,54 +388,48 @@ class MLPRegressor(BaseTransformer):
386
388
  self
387
389
  """
388
390
  self._infer_input_output_cols(dataset)
389
- if isinstance(dataset, pd.DataFrame):
390
- assert self._sklearn_object is not None # keep mypy happy
391
- self._sklearn_object = self._handlers.fit_pandas(
392
- dataset,
393
- self._sklearn_object,
394
- self.input_cols,
395
- self.label_cols,
396
- self.sample_weight_col
397
- )
398
- elif isinstance(dataset, DataFrame):
399
- self._fit_snowpark(dataset)
400
- else:
401
- raise TypeError(
402
- f"Unexpected dataset type: {type(dataset)}."
403
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
404
- )
391
+ if isinstance(dataset, DataFrame):
392
+ session = dataset._session
393
+ assert session is not None # keep mypy happy
394
+ # Validate that key package version in user workspace are supported in snowflake conda channel
395
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
396
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
397
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
398
+
399
+ # Specify input columns so column pruning will be enforced
400
+ selected_cols = self._get_active_columns()
401
+ if len(selected_cols) > 0:
402
+ dataset = dataset.select(selected_cols)
403
+
404
+ self._snowpark_cols = dataset.select(self.input_cols).columns
405
+
406
+ # If we are already in a stored procedure, no need to kick off another one.
407
+ if SNOWML_SPROC_ENV in os.environ:
408
+ statement_params = telemetry.get_function_usage_statement_params(
409
+ project=_PROJECT,
410
+ subproject=_SUBPROJECT,
411
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), MLPRegressor.__class__.__name__),
412
+ api_calls=[Session.call],
413
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
414
+ )
415
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
416
+ pd_df.columns = dataset.columns
417
+ dataset = pd_df
418
+
419
+ model_trainer = ModelTrainerBuilder.build(
420
+ estimator=self._sklearn_object,
421
+ dataset=dataset,
422
+ input_cols=self.input_cols,
423
+ label_cols=self.label_cols,
424
+ sample_weight_col=self.sample_weight_col,
425
+ autogenerated=self._autogenerated,
426
+ subproject=_SUBPROJECT
427
+ )
428
+ self._sklearn_object = model_trainer.train()
405
429
  self._is_fitted = True
406
430
  self._get_model_signatures(dataset)
407
431
  return self
408
432
 
409
- def _fit_snowpark(self, dataset: DataFrame) -> None:
410
- session = dataset._session
411
- assert session is not None # keep mypy happy
412
- # Validate that key package version in user workspace are supported in snowflake conda channel
413
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
414
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
415
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
416
-
417
- # Specify input columns so column pruning will be enforced
418
- selected_cols = self._get_active_columns()
419
- if len(selected_cols) > 0:
420
- dataset = dataset.select(selected_cols)
421
-
422
- estimator = self._sklearn_object
423
- assert estimator is not None # Keep mypy happy
424
-
425
- self._snowpark_cols = dataset.select(self.input_cols).columns
426
-
427
- self._sklearn_object = self._handlers.fit_snowpark(
428
- dataset,
429
- session,
430
- estimator,
431
- ["snowflake-snowpark-python"] + self._get_dependencies(),
432
- self.input_cols,
433
- self.label_cols,
434
- self.sample_weight_col,
435
- )
436
-
437
433
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
438
434
  if self._drop_input_cols:
439
435
  return []
@@ -621,11 +617,6 @@ class MLPRegressor(BaseTransformer):
621
617
  subproject=_SUBPROJECT,
622
618
  custom_tags=dict([("autogen", True)]),
623
619
  )
624
- @telemetry.add_stmt_params_to_df(
625
- project=_PROJECT,
626
- subproject=_SUBPROJECT,
627
- custom_tags=dict([("autogen", True)]),
628
- )
629
620
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
630
621
  """Predict using the multi-layer perceptron model
631
622
  For more details on this function, see [sklearn.neural_network.MLPRegressor.predict]
@@ -679,11 +670,6 @@ class MLPRegressor(BaseTransformer):
679
670
  subproject=_SUBPROJECT,
680
671
  custom_tags=dict([("autogen", True)]),
681
672
  )
682
- @telemetry.add_stmt_params_to_df(
683
- project=_PROJECT,
684
- subproject=_SUBPROJECT,
685
- custom_tags=dict([("autogen", True)]),
686
- )
687
673
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
688
674
  """Method not supported for this class.
689
675
 
@@ -740,7 +726,8 @@ class MLPRegressor(BaseTransformer):
740
726
  if False:
741
727
  self.fit(dataset)
742
728
  assert self._sklearn_object is not None
743
- return self._sklearn_object.labels_
729
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
730
+ return labels
744
731
  else:
745
732
  raise NotImplementedError
746
733
 
@@ -776,6 +763,7 @@ class MLPRegressor(BaseTransformer):
776
763
  output_cols = []
777
764
 
778
765
  # Make sure column names are valid snowflake identifiers.
766
+ assert output_cols is not None # Make MyPy happy
779
767
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
780
768
 
781
769
  return rv
@@ -786,11 +774,6 @@ class MLPRegressor(BaseTransformer):
786
774
  subproject=_SUBPROJECT,
787
775
  custom_tags=dict([("autogen", True)]),
788
776
  )
789
- @telemetry.add_stmt_params_to_df(
790
- project=_PROJECT,
791
- subproject=_SUBPROJECT,
792
- custom_tags=dict([("autogen", True)]),
793
- )
794
777
  def predict_proba(
795
778
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
796
779
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -831,11 +814,6 @@ class MLPRegressor(BaseTransformer):
831
814
  subproject=_SUBPROJECT,
832
815
  custom_tags=dict([("autogen", True)]),
833
816
  )
834
- @telemetry.add_stmt_params_to_df(
835
- project=_PROJECT,
836
- subproject=_SUBPROJECT,
837
- custom_tags=dict([("autogen", True)]),
838
- )
839
817
  def predict_log_proba(
840
818
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
841
819
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -872,16 +850,6 @@ class MLPRegressor(BaseTransformer):
872
850
  return output_df
873
851
 
874
852
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
875
- @telemetry.send_api_usage_telemetry(
876
- project=_PROJECT,
877
- subproject=_SUBPROJECT,
878
- custom_tags=dict([("autogen", True)]),
879
- )
880
- @telemetry.add_stmt_params_to_df(
881
- project=_PROJECT,
882
- subproject=_SUBPROJECT,
883
- custom_tags=dict([("autogen", True)]),
884
- )
885
853
  def decision_function(
886
854
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
887
855
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -982,11 +950,6 @@ class MLPRegressor(BaseTransformer):
982
950
  subproject=_SUBPROJECT,
983
951
  custom_tags=dict([("autogen", True)]),
984
952
  )
985
- @telemetry.add_stmt_params_to_df(
986
- project=_PROJECT,
987
- subproject=_SUBPROJECT,
988
- custom_tags=dict([("autogen", True)]),
989
- )
990
953
  def kneighbors(
991
954
  self,
992
955
  dataset: Union[DataFrame, pd.DataFrame],
@@ -1046,18 +1009,28 @@ class MLPRegressor(BaseTransformer):
1046
1009
  # For classifier, the type of predict is the same as the type of label
1047
1010
  if self._sklearn_object._estimator_type == 'classifier':
1048
1011
  # label columns is the desired type for output
1049
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
1012
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
1050
1013
  # rename the output columns
1051
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
1014
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
1052
1015
  self._model_signature_dict["predict"] = ModelSignature(inputs,
1053
1016
  ([] if self._drop_input_cols else inputs)
1054
1017
  + outputs)
1018
+ # For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
1019
+ # For outlier models, returns -1 for outliers and 1 for inliers.
1020
+ # Clusterer returns int64 cluster labels.
1021
+ elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
1022
+ outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
1023
+ self._model_signature_dict["predict"] = ModelSignature(inputs,
1024
+ ([] if self._drop_input_cols else inputs)
1025
+ + outputs)
1026
+
1055
1027
  # For regressor, the type of predict is float64
1056
1028
  elif self._sklearn_object._estimator_type == 'regressor':
1057
1029
  outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
1058
1030
  self._model_signature_dict["predict"] = ModelSignature(inputs,
1059
1031
  ([] if self._drop_input_cols else inputs)
1060
1032
  + outputs)
1033
+
1061
1034
  for prob_func in PROB_FUNCTIONS:
1062
1035
  if hasattr(self, prob_func):
1063
1036
  output_cols_prefix: str = f"{prob_func}_"
@@ -1,8 +1,4 @@
1
1
  """Disables the distributed implementation of Grid Search and Randomized Search CV"""
2
- from snowflake.ml.modeling.model_selection.grid_search_cv import GridSearchCV
3
- from snowflake.ml.modeling.model_selection.randomized_search_cv import (
4
- RandomizedSearchCV,
5
- )
2
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
6
3
 
7
- GridSearchCV._ENABLE_DISTRIBUTED = False
8
- RandomizedSearchCV._ENABLE_DISTRIBUTED = False
4
+ ModelTrainerBuilder._ENABLE_DISTRIBUTED = False
@@ -21,11 +21,25 @@ class Binarizer(base.BaseTransformer):
21
21
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Binarizer.html).
22
22
 
23
23
  Args:
24
- threshold: Feature values below or equal to this are replaced by 0, above it by 1. Default values is 0.0.
25
- input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be binarized.
26
- output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
24
+ threshold: float, default=0.0
25
+ Feature values below or equal to this are replaced by 0, above it by 1. Default values is 0.0.
26
+
27
+ input_cols: Optional[Union[str, Iterable[str]]], default=None
28
+ The name(s) of one or more columns in a DataFrame containing a feature to be binarized.
29
+
30
+ output_cols: Optional[Union[str, Iterable[str]]], default=None
31
+ The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
27
32
  columns specified must match the number of input columns.
28
- drop_input_cols: Remove input columns from output if set True. False by default.
33
+
34
+ passthrough_cols: Optional[Union[str, Iterable[str]]], default=None
35
+ A string or a list of strings indicating column names to be excluded from any
36
+ operations (such as train, transform, or inference). These specified column(s)
37
+ will remain untouched throughout the process. This option is helpful in scenarios
38
+ requiring automatic input_cols inference, but need to avoid using specific
39
+ columns, like index columns, during training or inference.
40
+
41
+ drop_input_cols: Optional[bool], default=False
42
+ Remove input columns from output if set True. False by default.
29
43
  """
30
44
 
31
45
  def __init__(
@@ -34,6 +48,7 @@ class Binarizer(base.BaseTransformer):
34
48
  threshold: float = 0.0,
35
49
  input_cols: Optional[Union[str, Iterable[str]]] = None,
36
50
  output_cols: Optional[Union[str, Iterable[str]]] = None,
51
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
37
52
  drop_input_cols: Optional[bool] = False,
38
53
  ) -> None:
39
54
  """
@@ -49,12 +64,18 @@ class Binarizer(base.BaseTransformer):
49
64
  input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be binarized.
50
65
  output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number
51
66
  of columns specified must match the number of input columns.
67
+ passthrough_cols: A string or a list of strings indicating column names to be excluded from any
68
+ operations (such as train, transform, or inference). These specified column(s)
69
+ will remain untouched throughout the process. This option is helful in scenarios
70
+ requiring automatic input_cols inference, but need to avoid using specific
71
+ columns, like index columns, during in training or inference.
52
72
  drop_input_cols: Remove input columns from output if set True. False by default.
53
73
  """
54
74
  super().__init__(drop_input_cols=drop_input_cols)
55
75
  self.threshold = threshold
56
76
  self.set_input_cols(input_cols)
57
77
  self.set_output_cols(output_cols)
78
+ self.set_passthrough_cols(passthrough_cols)
58
79
 
59
80
  def _reset(self) -> None:
60
81
  """
@@ -96,10 +117,6 @@ class Binarizer(base.BaseTransformer):
96
117
  project=base.PROJECT,
97
118
  subproject=base.SUBPROJECT,
98
119
  )
99
- @telemetry.add_stmt_params_to_df(
100
- project=base.PROJECT,
101
- subproject=base.SUBPROJECT,
102
- )
103
120
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
104
121
  """
105
122
  Binarize the data. Map to 1 if it is strictly greater than the threshold, otherwise 0.
@@ -38,6 +38,7 @@ _SKLEARN_UNUSED_KEYWORDS = [
38
38
  _SNOWML_ONLY_KEYWORDS = [
39
39
  "input_cols",
40
40
  "output_cols",
41
+ "passthrough_cols",
41
42
  ] # snowml only keywords not present in sklearn
42
43
 
43
44
  _VALID_ENCODING_SCHEME = ["onehot", "onehot-dense", "ordinal"]
@@ -78,6 +79,12 @@ class KBinsDiscretizer(base.BaseTransformer):
78
79
  output_cols: str or Iterable [column_name], default=None
79
80
  Single or multiple output columns.
80
81
 
82
+ passthrough_cols: A string or a list of strings indicating column names to be excluded from any
83
+ operations (such as train, transform, or inference). These specified column(s)
84
+ will remain untouched throughout the process. This option is helpful in scenarios
85
+ requiring automatic input_cols inference, but need to avoid using specific
86
+ columns, like index columns, during training or inference.
87
+
81
88
  drop_input_cols: boolean, default=False
82
89
  Remove input columns from output if set True.
83
90
 
@@ -97,6 +104,7 @@ class KBinsDiscretizer(base.BaseTransformer):
97
104
  strategy: str = "quantile",
98
105
  input_cols: Optional[Union[str, Iterable[str]]] = None,
99
106
  output_cols: Optional[Union[str, Iterable[str]]] = None,
107
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
100
108
  drop_input_cols: Optional[bool] = False,
101
109
  ) -> None:
102
110
  super().__init__(drop_input_cols=drop_input_cols)
@@ -105,6 +113,7 @@ class KBinsDiscretizer(base.BaseTransformer):
105
113
  self.strategy = strategy
106
114
  self.set_input_cols(input_cols)
107
115
  self.set_output_cols(output_cols)
116
+ self.set_passthrough_cols(passthrough_cols)
108
117
 
109
118
  def _enforce_params(self) -> None:
110
119
  self.n_bins = self.n_bins if isinstance(self.n_bins, Iterable) else [self.n_bins] * len(self.input_cols)
@@ -168,10 +177,6 @@ class KBinsDiscretizer(base.BaseTransformer):
168
177
  project=base.PROJECT,
169
178
  subproject=base.SUBPROJECT,
170
179
  )
171
- @telemetry.add_stmt_params_to_df(
172
- project=base.PROJECT,
173
- subproject=base.SUBPROJECT,
174
- )
175
180
  def transform(
176
181
  self, dataset: Union[snowpark.DataFrame, pd.DataFrame]
177
182
  ) -> Union[snowpark.DataFrame, pd.DataFrame, sparse.csr_matrix]:
@@ -24,30 +24,53 @@ class LabelEncoder(base.BaseTransformer):
24
24
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html).
25
25
 
26
26
  Args:
27
- input_cols: The name of a column in a DataFrame to be encoded. May be a string or a list containing one string.
28
- output_cols: The name of a column in a DataFrame where the results will be stored. May be a string or a list
27
+ input_cols: Optional[Union[str, List[str]]]
28
+ The name of a column in a DataFrame to be encoded. May be a string or a list containing one string.
29
+
30
+ output_cols: Optional[Union[str, List[str]]]
31
+ The name of a column in a DataFrame where the results will be stored. May be a string or a list
29
32
  containing one string.
30
- drop_input_cols: Remove input columns from output if set True. False by default.
33
+
34
+ passthrough_cols: Optional[Union[str, List[str]]]
35
+ A string or a list of strings indicating column names to be excluded from any
36
+ operations (such as train, transform, or inference). These specified column(s)
37
+ will remain untouched throughout the process. This option is helpful in scenarios
38
+ requiring automatic input_cols inference, but need to avoid using specific
39
+ columns, like index columns, during training or inference.
40
+
41
+ drop_input_cols: Optional[bool], default=False
42
+ Remove input columns from output if set True. False by default.
31
43
  """
32
44
 
33
45
  def __init__(
34
46
  self,
35
47
  input_cols: Optional[Union[str, Iterable[str]]] = None,
36
48
  output_cols: Optional[Union[str, Iterable[str]]] = None,
49
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
37
50
  drop_input_cols: Optional[bool] = False,
38
51
  ) -> None:
39
52
  """
40
53
  Encode target labels with integers between 0 and n_classes-1.
41
54
 
42
55
  Args:
43
- input_cols: The name of a column in a DataFrame to be encoded. May be a string or a list containing one
56
+ input_cols: Optional[Union[str, List[str]]]
57
+ The name of a column in a DataFrame to be encoded. May be a string or a list containing one
44
58
  string.
45
- output_cols: The name of a column in a DataFrame where the results will be stored. May be a string or a list
59
+ output_cols: Optional[Union[str, List[str]]]
60
+ The name of a column in a DataFrame where the results will be stored. May be a string or a list
46
61
  containing one string.
47
- drop_input_cols: Remove input columns from output if set True. False by default.
62
+ passthrough_cols: Optional[Union[str, List[str]]]
63
+ A string or a list of strings indicating column names to be excluded from any
64
+ operations (such as train, transform, or inference). These specified column(s)
65
+ will remain untouched throughout the process. This option is helful in scenarios
66
+ requiring automatic input_cols inference, but need to avoid using specific
67
+ columns, like index columns, during in training or inference.
68
+ drop_input_cols: Optional[bool], default=False
69
+ Remove input columns from output if set True. False by default.
48
70
 
49
71
  Attributes:
50
- classes_: A np.ndarray that holds the label for each class.
72
+ classes_: Optional[type_utils.LiteralNDArrayType]
73
+ A np.ndarray that holds the label for each class.
51
74
  Attributes are valid only after fit() has been called.
52
75
 
53
76
  """
@@ -56,6 +79,7 @@ class LabelEncoder(base.BaseTransformer):
56
79
  self.classes_: Optional[type_utils.LiteralNDArrayType] = None
57
80
  self.set_input_cols(input_cols)
58
81
  self.set_output_cols(output_cols)
82
+ self.set_passthrough_cols(passthrough_cols)
59
83
 
60
84
  def _reset(self) -> None:
61
85
  super()._reset()
@@ -114,10 +138,6 @@ class LabelEncoder(base.BaseTransformer):
114
138
  project=base.PROJECT,
115
139
  subproject=base.SUBPROJECT,
116
140
  )
117
- @telemetry.add_stmt_params_to_df(
118
- project=base.PROJECT,
119
- subproject=base.SUBPROJECT,
120
- )
121
141
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
122
142
  """
123
143
  Use fit result to transform snowpark dataframe or pandas dataframe. The original dataset with