snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. snowflake/cortex/_complete.py +1 -1
  2. snowflake/cortex/_extract_answer.py +1 -1
  3. snowflake/cortex/_sentiment.py +1 -1
  4. snowflake/cortex/_summarize.py +1 -1
  5. snowflake/cortex/_translate.py +1 -1
  6. snowflake/ml/_internal/env_utils.py +68 -6
  7. snowflake/ml/_internal/file_utils.py +34 -4
  8. snowflake/ml/_internal/telemetry.py +79 -91
  9. snowflake/ml/_internal/utils/identifier.py +78 -72
  10. snowflake/ml/_internal/utils/retryable_http.py +16 -4
  11. snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
  12. snowflake/ml/dataset/dataset.py +1 -1
  13. snowflake/ml/model/_api.py +21 -14
  14. snowflake/ml/model/_client/model/model_impl.py +176 -0
  15. snowflake/ml/model/_client/model/model_method_info.py +19 -0
  16. snowflake/ml/model/_client/model/model_version_impl.py +291 -0
  17. snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
  18. snowflake/ml/model/_client/ops/model_ops.py +308 -0
  19. snowflake/ml/model/_client/sql/model.py +75 -0
  20. snowflake/ml/model/_client/sql/model_version.py +213 -0
  21. snowflake/ml/model/_client/sql/stage.py +40 -0
  22. snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
  23. snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
  24. snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
  25. snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
  26. snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
  27. snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
  28. snowflake/ml/model/_model_composer/model_composer.py +31 -9
  29. snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
  30. snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
  31. snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
  32. snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
  33. snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
  34. snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
  35. snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
  36. snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
  37. snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
  38. snowflake/ml/model/model_signature.py +108 -53
  39. snowflake/ml/model/type_hints.py +1 -0
  40. snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
  41. snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
  42. snowflake/ml/modeling/_internal/model_specifications.py +146 -0
  43. snowflake/ml/modeling/_internal/model_trainer.py +13 -0
  44. snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
  45. snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
  46. snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
  47. snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
  48. snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
  49. snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
  50. snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
  51. snowflake/ml/modeling/cluster/birch.py +106 -135
  52. snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
  53. snowflake/ml/modeling/cluster/dbscan.py +106 -135
  54. snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
  55. snowflake/ml/modeling/cluster/k_means.py +105 -135
  56. snowflake/ml/modeling/cluster/mean_shift.py +106 -135
  57. snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
  58. snowflake/ml/modeling/cluster/optics.py +106 -135
  59. snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
  60. snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
  61. snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
  62. snowflake/ml/modeling/compose/column_transformer.py +106 -135
  63. snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
  64. snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
  65. snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
  66. snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
  67. snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
  68. snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
  69. snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
  70. snowflake/ml/modeling/covariance/oas.py +99 -128
  71. snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
  72. snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
  73. snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
  74. snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
  75. snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
  76. snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
  77. snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
  78. snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
  79. snowflake/ml/modeling/decomposition/pca.py +106 -135
  80. snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
  81. snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
  82. snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
  83. snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
  84. snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
  85. snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
  86. snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
  87. snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
  88. snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
  89. snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
  90. snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
  91. snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
  92. snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
  93. snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
  94. snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
  95. snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
  96. snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
  97. snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
  98. snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
  99. snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
  100. snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
  101. snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
  102. snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
  103. snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
  104. snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
  105. snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
  106. snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
  107. snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
  108. snowflake/ml/modeling/framework/base.py +83 -1
  109. snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
  110. snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
  111. snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
  112. snowflake/ml/modeling/impute/knn_imputer.py +106 -135
  113. snowflake/ml/modeling/impute/missing_indicator.py +106 -135
  114. snowflake/ml/modeling/impute/simple_imputer.py +9 -1
  115. snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
  116. snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
  117. snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
  118. snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
  119. snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
  120. snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
  121. snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
  122. snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
  123. snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
  124. snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
  125. snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
  126. snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
  127. snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
  128. snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
  129. snowflake/ml/modeling/linear_model/lars.py +108 -135
  130. snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
  131. snowflake/ml/modeling/linear_model/lasso.py +108 -135
  132. snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
  133. snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
  134. snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
  135. snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
  136. snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
  137. snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
  138. snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
  139. snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
  140. snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
  141. snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
  142. snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
  143. snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
  144. snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
  145. snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
  146. snowflake/ml/modeling/linear_model/perceptron.py +107 -135
  147. snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
  148. snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
  149. snowflake/ml/modeling/linear_model/ridge.py +108 -135
  150. snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
  151. snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
  152. snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
  153. snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
  154. snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
  155. snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
  156. snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
  157. snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
  158. snowflake/ml/modeling/manifold/isomap.py +106 -135
  159. snowflake/ml/modeling/manifold/mds.py +106 -135
  160. snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
  161. snowflake/ml/modeling/manifold/tsne.py +106 -135
  162. snowflake/ml/modeling/metrics/classification.py +196 -55
  163. snowflake/ml/modeling/metrics/correlation.py +4 -2
  164. snowflake/ml/modeling/metrics/covariance.py +7 -4
  165. snowflake/ml/modeling/metrics/ranking.py +32 -16
  166. snowflake/ml/modeling/metrics/regression.py +60 -32
  167. snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
  168. snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
  169. snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
  170. snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
  171. snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
  172. snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
  173. snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
  174. snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
  175. snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
  176. snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
  177. snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
  178. snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
  179. snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
  180. snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
  181. snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
  182. snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
  183. snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
  184. snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
  185. snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
  186. snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
  187. snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
  188. snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
  189. snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
  190. snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
  191. snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
  192. snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
  193. snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
  194. snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
  195. snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
  196. snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
  197. snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
  198. snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
  199. snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
  200. snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
  201. snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
  202. snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
  203. snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
  204. snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
  205. snowflake/ml/modeling/svm/linear_svc.py +108 -135
  206. snowflake/ml/modeling/svm/linear_svr.py +108 -135
  207. snowflake/ml/modeling/svm/nu_svc.py +108 -135
  208. snowflake/ml/modeling/svm/nu_svr.py +108 -135
  209. snowflake/ml/modeling/svm/svc.py +108 -135
  210. snowflake/ml/modeling/svm/svr.py +108 -135
  211. snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
  212. snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
  213. snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
  214. snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
  215. snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
  216. snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
  217. snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
  218. snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
  219. snowflake/ml/registry/model_registry.py +2 -0
  220. snowflake/ml/registry/registry.py +215 -0
  221. snowflake/ml/version.py +1 -1
  222. {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
  223. snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
  224. snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
  225. {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
22
22
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
23
23
  from snowflake.ml._internal import telemetry
24
24
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
25
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
25
26
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
26
- from snowflake.snowpark import DataFrame
27
+ from snowflake.snowpark import DataFrame, Session
27
28
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
28
29
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
30
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
31
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
29
32
  from snowflake.ml.modeling._internal.estimator_utils import (
30
33
  gather_dependencies,
31
34
  original_estimator_has_callable,
32
35
  transform_snowml_obj_to_sklearn_obj,
33
36
  validate_sklearn_args,
34
37
  )
35
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
36
38
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
37
39
 
38
40
  from snowflake.ml.model.model_signature import (
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
52
54
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.preprocessing".replace("sklearn.", "").split("_")])
53
55
 
54
56
 
55
-
56
57
  class PolynomialFeatures(BaseTransformer):
57
58
  r"""Generate polynomial and interaction features
58
59
  For more details on this class, see [sklearn.preprocessing.PolynomialFeatures]
@@ -60,6 +61,49 @@ class PolynomialFeatures(BaseTransformer):
60
61
 
61
62
  Parameters
62
63
  ----------
64
+
65
+ input_cols: Optional[Union[str, List[str]]]
66
+ A string or list of strings representing column names that contain features.
67
+ If this parameter is not specified, all columns in the input DataFrame except
68
+ the columns specified by label_cols, sample_weight_col, and passthrough_cols
69
+ parameters are considered input columns. Input columns can also be set after
70
+ initialization with the `set_input_cols` method.
71
+
72
+ label_cols: Optional[Union[str, List[str]]]
73
+ This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
74
+
75
+ output_cols: Optional[Union[str, List[str]]]
76
+ A string or list of strings representing column names that will store the
77
+ output of predict and transform operations. The length of output_cols must
78
+ match the expected number of output columns from the specific predictor or
79
+ transformer class used.
80
+ If you omit this parameter, output column names are derived by adding an
81
+ OUTPUT_ prefix to the label column names for supervised estimators, or
82
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
83
+ work for predictors, but output_cols must be set explicitly for transformers.
84
+ In general, explicitly specifying output column names is clearer, especially
85
+ if you don’t specify the input column names.
86
+ To transform in place, pass the same names for input_cols and output_cols.
87
+ be set explicitly for transformers. Output columns can also be set after
88
+ initialization with the `set_output_cols` method.
89
+
90
+ sample_weight_col: Optional[str]
91
+ A string representing the column name containing the sample weights.
92
+ This argument is only required when working with weighted datasets. Sample
93
+ weight column can also be set after initialization with the
94
+ `set_sample_weight_col` method.
95
+
96
+ passthrough_cols: Optional[Union[str, List[str]]]
97
+ A string or a list of strings indicating column names to be excluded from any
98
+ operations (such as train, transform, or inference). These specified column(s)
99
+ will remain untouched throughout the process. This option is helpful in scenarios
100
+ requiring automatic input_cols inference, but need to avoid using specific
101
+ columns, like index columns, during training or inference. Passthrough columns
102
+ can also be set after initialization with the `set_passthrough_cols` method.
103
+
104
+ drop_input_cols: Optional[bool], default=False
105
+ If set, the response of predict(), transform() methods will not contain input columns.
106
+
63
107
  degree: int or tuple (min_degree, max_degree), default=2
64
108
  If a single int is given, it specifies the maximal degree of the
65
109
  polynomial features. If a tuple `(min_degree, max_degree)` is passed,
@@ -84,35 +128,6 @@ class PolynomialFeatures(BaseTransformer):
84
128
  order: {'C', 'F'}, default='C'
85
129
  Order of output array in the dense case. `'F'` order is faster to
86
130
  compute, but may slow down subsequent estimators.
87
-
88
- input_cols: Optional[Union[str, List[str]]]
89
- A string or list of strings representing column names that contain features.
90
- If this parameter is not specified, all columns in the input DataFrame except
91
- the columns specified by label_cols and sample_weight_col parameters are
92
- considered input columns.
93
-
94
- label_cols: Optional[Union[str, List[str]]]
95
- A string or list of strings representing column names that contain labels.
96
- This is a required param for estimators, as there is no way to infer these
97
- columns. If this parameter is not specified, then object is fitted without
98
- labels (like a transformer).
99
-
100
- output_cols: Optional[Union[str, List[str]]]
101
- A string or list of strings representing column names that will store the
102
- output of predict and transform operations. The length of output_cols must
103
- match the expected number of output columns from the specific estimator or
104
- transformer class used.
105
- If this parameter is not specified, output column names are derived by
106
- adding an OUTPUT_ prefix to the label column names. These inferred output
107
- column names work for estimator's predict() method, but output_cols must
108
- be set explicitly for transformers.
109
-
110
- sample_weight_col: Optional[str]
111
- A string representing the column name containing the sample weights.
112
- This argument is only required when working with weighted datasets.
113
-
114
- drop_input_cols: Optional[bool], default=False
115
- If set, the response of predict(), transform() methods will not contain input columns.
116
131
  """
117
132
 
118
133
  def __init__( # type: ignore[no-untyped-def]
@@ -125,6 +140,7 @@ class PolynomialFeatures(BaseTransformer):
125
140
  input_cols: Optional[Union[str, Iterable[str]]] = None,
126
141
  output_cols: Optional[Union[str, Iterable[str]]] = None,
127
142
  label_cols: Optional[Union[str, Iterable[str]]] = None,
143
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
128
144
  drop_input_cols: Optional[bool] = False,
129
145
  sample_weight_col: Optional[str] = None,
130
146
  ) -> None:
@@ -133,9 +149,10 @@ class PolynomialFeatures(BaseTransformer):
133
149
  self.set_input_cols(input_cols)
134
150
  self.set_output_cols(output_cols)
135
151
  self.set_label_cols(label_cols)
152
+ self.set_passthrough_cols(passthrough_cols)
136
153
  self.set_drop_input_cols(drop_input_cols)
137
154
  self.set_sample_weight_col(sample_weight_col)
138
- deps = set(SklearnWrapperProvider().dependencies)
155
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
139
156
 
140
157
  self._deps = list(deps)
141
158
 
@@ -147,13 +164,14 @@ class PolynomialFeatures(BaseTransformer):
147
164
  args=init_args,
148
165
  klass=sklearn.preprocessing.PolynomialFeatures
149
166
  )
150
- self._sklearn_object = sklearn.preprocessing.PolynomialFeatures(
167
+ self._sklearn_object: Any = sklearn.preprocessing.PolynomialFeatures(
151
168
  **cleaned_up_init_args,
152
169
  )
153
170
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
154
171
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
155
172
  self._snowpark_cols: Optional[List[str]] = self.input_cols
156
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=PolynomialFeatures.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
173
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=PolynomialFeatures.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
174
+ self._autogenerated = True
157
175
 
158
176
  def _get_rand_id(self) -> str:
159
177
  """
@@ -164,24 +182,6 @@ class PolynomialFeatures(BaseTransformer):
164
182
  """
165
183
  return str(uuid4()).replace("-", "_").upper()
166
184
 
167
- def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
168
- """
169
- Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
170
-
171
- Args:
172
- dataset: Input dataset.
173
- """
174
- if not self.input_cols:
175
- cols = [
176
- c for c in dataset.columns
177
- if c not in self.get_label_cols() and c != self.sample_weight_col
178
- ]
179
- self.set_input_cols(input_cols=cols)
180
-
181
- if not self.output_cols:
182
- cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
183
- self.set_output_cols(output_cols=cols)
184
-
185
185
  def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "PolynomialFeatures":
186
186
  """
187
187
  Input columns setter.
@@ -227,54 +227,48 @@ class PolynomialFeatures(BaseTransformer):
227
227
  self
228
228
  """
229
229
  self._infer_input_output_cols(dataset)
230
- if isinstance(dataset, pd.DataFrame):
231
- assert self._sklearn_object is not None # keep mypy happy
232
- self._sklearn_object = self._handlers.fit_pandas(
233
- dataset,
234
- self._sklearn_object,
235
- self.input_cols,
236
- self.label_cols,
237
- self.sample_weight_col
238
- )
239
- elif isinstance(dataset, DataFrame):
240
- self._fit_snowpark(dataset)
241
- else:
242
- raise TypeError(
243
- f"Unexpected dataset type: {type(dataset)}."
244
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
245
- )
230
+ if isinstance(dataset, DataFrame):
231
+ session = dataset._session
232
+ assert session is not None # keep mypy happy
233
+ # Validate that key package version in user workspace are supported in snowflake conda channel
234
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
235
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
236
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
237
+
238
+ # Specify input columns so column pruning will be enforced
239
+ selected_cols = self._get_active_columns()
240
+ if len(selected_cols) > 0:
241
+ dataset = dataset.select(selected_cols)
242
+
243
+ self._snowpark_cols = dataset.select(self.input_cols).columns
244
+
245
+ # If we are already in a stored procedure, no need to kick off another one.
246
+ if SNOWML_SPROC_ENV in os.environ:
247
+ statement_params = telemetry.get_function_usage_statement_params(
248
+ project=_PROJECT,
249
+ subproject=_SUBPROJECT,
250
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), PolynomialFeatures.__class__.__name__),
251
+ api_calls=[Session.call],
252
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
253
+ )
254
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
255
+ pd_df.columns = dataset.columns
256
+ dataset = pd_df
257
+
258
+ model_trainer = ModelTrainerBuilder.build(
259
+ estimator=self._sklearn_object,
260
+ dataset=dataset,
261
+ input_cols=self.input_cols,
262
+ label_cols=self.label_cols,
263
+ sample_weight_col=self.sample_weight_col,
264
+ autogenerated=self._autogenerated,
265
+ subproject=_SUBPROJECT
266
+ )
267
+ self._sklearn_object = model_trainer.train()
246
268
  self._is_fitted = True
247
269
  self._get_model_signatures(dataset)
248
270
  return self
249
271
 
250
- def _fit_snowpark(self, dataset: DataFrame) -> None:
251
- session = dataset._session
252
- assert session is not None # keep mypy happy
253
- # Validate that key package version in user workspace are supported in snowflake conda channel
254
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
255
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
256
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
257
-
258
- # Specify input columns so column pruning will be enforced
259
- selected_cols = self._get_active_columns()
260
- if len(selected_cols) > 0:
261
- dataset = dataset.select(selected_cols)
262
-
263
- estimator = self._sklearn_object
264
- assert estimator is not None # Keep mypy happy
265
-
266
- self._snowpark_cols = dataset.select(self.input_cols).columns
267
-
268
- self._sklearn_object = self._handlers.fit_snowpark(
269
- dataset,
270
- session,
271
- estimator,
272
- ["snowflake-snowpark-python"] + self._get_dependencies(),
273
- self.input_cols,
274
- self.label_cols,
275
- self.sample_weight_col,
276
- )
277
-
278
272
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
279
273
  if self._drop_input_cols:
280
274
  return []
@@ -462,11 +456,6 @@ class PolynomialFeatures(BaseTransformer):
462
456
  subproject=_SUBPROJECT,
463
457
  custom_tags=dict([("autogen", True)]),
464
458
  )
465
- @telemetry.add_stmt_params_to_df(
466
- project=_PROJECT,
467
- subproject=_SUBPROJECT,
468
- custom_tags=dict([("autogen", True)]),
469
- )
470
459
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
471
460
  """Method not supported for this class.
472
461
 
@@ -518,11 +507,6 @@ class PolynomialFeatures(BaseTransformer):
518
507
  subproject=_SUBPROJECT,
519
508
  custom_tags=dict([("autogen", True)]),
520
509
  )
521
- @telemetry.add_stmt_params_to_df(
522
- project=_PROJECT,
523
- subproject=_SUBPROJECT,
524
- custom_tags=dict([("autogen", True)]),
525
- )
526
510
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
527
511
  """Transform data to polynomial features
528
512
  For more details on this function, see [sklearn.preprocessing.PolynomialFeatures.transform]
@@ -581,7 +565,8 @@ class PolynomialFeatures(BaseTransformer):
581
565
  if False:
582
566
  self.fit(dataset)
583
567
  assert self._sklearn_object is not None
584
- return self._sklearn_object.labels_
568
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
569
+ return labels
585
570
  else:
586
571
  raise NotImplementedError
587
572
 
@@ -617,6 +602,7 @@ class PolynomialFeatures(BaseTransformer):
617
602
  output_cols = []
618
603
 
619
604
  # Make sure column names are valid snowflake identifiers.
605
+ assert output_cols is not None # Make MyPy happy
620
606
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
621
607
 
622
608
  return rv
@@ -627,11 +613,6 @@ class PolynomialFeatures(BaseTransformer):
627
613
  subproject=_SUBPROJECT,
628
614
  custom_tags=dict([("autogen", True)]),
629
615
  )
630
- @telemetry.add_stmt_params_to_df(
631
- project=_PROJECT,
632
- subproject=_SUBPROJECT,
633
- custom_tags=dict([("autogen", True)]),
634
- )
635
616
  def predict_proba(
636
617
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
637
618
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -672,11 +653,6 @@ class PolynomialFeatures(BaseTransformer):
672
653
  subproject=_SUBPROJECT,
673
654
  custom_tags=dict([("autogen", True)]),
674
655
  )
675
- @telemetry.add_stmt_params_to_df(
676
- project=_PROJECT,
677
- subproject=_SUBPROJECT,
678
- custom_tags=dict([("autogen", True)]),
679
- )
680
656
  def predict_log_proba(
681
657
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
682
658
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -713,16 +689,6 @@ class PolynomialFeatures(BaseTransformer):
713
689
  return output_df
714
690
 
715
691
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
716
- @telemetry.send_api_usage_telemetry(
717
- project=_PROJECT,
718
- subproject=_SUBPROJECT,
719
- custom_tags=dict([("autogen", True)]),
720
- )
721
- @telemetry.add_stmt_params_to_df(
722
- project=_PROJECT,
723
- subproject=_SUBPROJECT,
724
- custom_tags=dict([("autogen", True)]),
725
- )
726
692
  def decision_function(
727
693
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
728
694
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -821,11 +787,6 @@ class PolynomialFeatures(BaseTransformer):
821
787
  subproject=_SUBPROJECT,
822
788
  custom_tags=dict([("autogen", True)]),
823
789
  )
824
- @telemetry.add_stmt_params_to_df(
825
- project=_PROJECT,
826
- subproject=_SUBPROJECT,
827
- custom_tags=dict([("autogen", True)]),
828
- )
829
790
  def kneighbors(
830
791
  self,
831
792
  dataset: Union[DataFrame, pd.DataFrame],
@@ -885,18 +846,28 @@ class PolynomialFeatures(BaseTransformer):
885
846
  # For classifier, the type of predict is the same as the type of label
886
847
  if self._sklearn_object._estimator_type == 'classifier':
887
848
  # label columns is the desired type for output
888
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
849
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
889
850
  # rename the output columns
890
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
851
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
852
+ self._model_signature_dict["predict"] = ModelSignature(inputs,
853
+ ([] if self._drop_input_cols else inputs)
854
+ + outputs)
855
+ # For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
856
+ # For outlier models, returns -1 for outliers and 1 for inliers.
857
+ # Clusterer returns int64 cluster labels.
858
+ elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
859
+ outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
891
860
  self._model_signature_dict["predict"] = ModelSignature(inputs,
892
861
  ([] if self._drop_input_cols else inputs)
893
862
  + outputs)
863
+
894
864
  # For regressor, the type of predict is float64
895
865
  elif self._sklearn_object._estimator_type == 'regressor':
896
866
  outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
897
867
  self._model_signature_dict["predict"] = ModelSignature(inputs,
898
868
  ([] if self._drop_input_cols else inputs)
899
869
  + outputs)
870
+
900
871
  for prob_func in PROB_FUNCTIONS:
901
872
  if hasattr(self, prob_func):
902
873
  output_cols_prefix: str = f"{prob_func}_"
@@ -20,23 +20,46 @@ class RobustScaler(base.BaseTransformer):
20
20
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html).
21
21
 
22
22
  Args:
23
- with_centering: If True, center the data around zero before scaling.
24
- with_scaling: If True, scale the data to interquartile range.
25
- quantile_range: tuple like (q_min, q_max), where 0.0 < q_min < q_max < 100.0, default=(25.0, 75.0). Quantile
23
+ with_centering: bool, default=True
24
+ If True, center the data around zero before scaling.
25
+
26
+ with_scaling: bool, default=True
27
+ If True, scale the data to interquartile range.
28
+
29
+ quantile_range: Tuple[float, float], default=(25.0, 75.0)
30
+ tuple like (q_min, q_max), where 0.0 < q_min < q_max < 100.0, default=(25.0, 75.0). Quantile
26
31
  range used to calculate scale_. By default, this is equal to the IQR, i.e., q_min is the first quantile and
27
32
  q_max is the third quantile.
28
- unit_variance: If True, scale data so that normally-distributed features have a variance of 1. In general, if
33
+
34
+ unit_variance: bool, default=False
35
+ If True, scale data so that normally-distributed features have a variance of 1. In general, if
29
36
  the difference between the x-values of q_max and q_min for a standard normal distribution is greater than 1,
30
37
  the dataset is scaled down. If less than 1, the dataset is scaled up.
31
- input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
32
- output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
38
+
39
+ input_cols: Optional[Union[str, List[str]]], default=None
40
+ The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
41
+
42
+ output_cols: Optional[Union[str, List[str]]], default=None
43
+ The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
33
44
  columns specified must match the number of input columns. For dense output, the column names specified are
34
45
  used as base names for the columns created for each category.
35
- drop_input_cols: Remove input columns from output if set True. False by default.
46
+
47
+ passthrough_cols: Optional[Union[str, List[str]]], default=None
48
+ A string or a list of strings indicating column names to be excluded from any
49
+ operations (such as train, transform, or inference). These specified column(s)
50
+ will remain untouched throughout the process. This option is helpful in scenarios
51
+ requiring automatic input_cols inference, but need to avoid using specific
52
+ columns, like index columns, during training or inference.
53
+
54
+ drop_input_cols: Optional[bool], default=False
55
+ Remove input columns from output if set True. False by default.
36
56
 
37
57
  Attributes:
38
- center_: Dictionary mapping input column name to the median value for that feature.
39
- scale_: Dictionary mapping input column name to the (scaled) interquartile range for that feature.
58
+ center_: Dict[str, float]
59
+ Dictionary mapping input column name to the median value for that feature.
60
+
61
+ scale_: Dict[str, float]
62
+ Dictionary mapping input column name to the (scaled) interquartile range for that feature.
40
63
  """
41
64
 
42
65
  def __init__(
@@ -48,6 +71,7 @@ class RobustScaler(base.BaseTransformer):
48
71
  unit_variance: bool = False,
49
72
  input_cols: Optional[Union[str, Iterable[str]]] = None,
50
73
  output_cols: Optional[Union[str, Iterable[str]]] = None,
74
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
51
75
  drop_input_cols: Optional[bool] = False,
52
76
  ) -> None:
53
77
  """
@@ -68,6 +92,11 @@ class RobustScaler(base.BaseTransformer):
68
92
  If less than 1, the dataset will be scaled up.
69
93
  input_cols: Single or multiple input columns.
70
94
  output_cols: Single or multiple output columns.
95
+ passthrough_cols: A string or a list of strings indicating column names to be excluded from any
96
+ operations (such as train, transform, or inference). These specified column(s)
97
+ will remain untouched throughout the process. This option is helful in scenarios
98
+ requiring automatic input_cols inference, but need to avoid using specific
99
+ columns, like index columns, during in training or inference.
71
100
  drop_input_cols: Remove input columns from output if set True. False by default.
72
101
 
73
102
  Attributes:
@@ -95,6 +124,7 @@ class RobustScaler(base.BaseTransformer):
95
124
 
96
125
  self.set_input_cols(input_cols)
97
126
  self.set_output_cols(output_cols)
127
+ self.set_passthrough_cols(passthrough_cols)
98
128
 
99
129
  def _reset(self) -> None:
100
130
  """
@@ -187,10 +217,6 @@ class RobustScaler(base.BaseTransformer):
187
217
  project=base.PROJECT,
188
218
  subproject=base.SUBPROJECT,
189
219
  )
190
- @telemetry.add_stmt_params_to_df(
191
- project=base.PROJECT,
192
- subproject=base.SUBPROJECT,
193
- )
194
220
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
195
221
  """
196
222
  Center and scale the data.
@@ -19,19 +19,40 @@ class StandardScaler(base.BaseTransformer):
19
19
  (https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html).
20
20
 
21
21
  Args:
22
- with_mean: If True, center the data before scaling.
23
- with_std: If True, scale the data unit variance (i.e. unit standard deviation).
24
- input_cols: The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
25
- output_cols: The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
22
+ with_mean: bool, default=True
23
+ If True, center the data before scaling.
24
+
25
+ with_std: bool, default=True
26
+ If True, scale the data unit variance (i.e. unit standard deviation).
27
+
28
+ input_cols: Optional[Union[str, List[str]]], default=None
29
+ The name(s) of one or more columns in a DataFrame containing a feature to be scaled.
30
+
31
+ output_cols: Optional[Union[str, List[str]]], default=None
32
+ The name(s) of one or more columns in a DataFrame in which results will be stored. The number of
26
33
  columns specified must match the number of input columns.
27
- drop_input_cols: Remove input columns from output if set True. False by default.
34
+
35
+ passthrough_cols: Optional[Union[str, List[str]]], default=None
36
+ A string or a list of strings indicating column names to be excluded from any
37
+ operations (such as train, transform, or inference). These specified column(s)
38
+ will remain untouched throughout the process. This option is helpful in scenarios
39
+ requiring automatic input_cols inference, but need to avoid using specific
40
+ columns, like index columns, during training or inference.
41
+
42
+ drop_input_cols: Optional[bool], default=False
43
+ Remove input columns from output if set True. False by default.
28
44
 
29
45
  Attributes:
30
- scale_: Dictionary mapping input column names to relative scaling factor to achieve zero mean and unit variance.
46
+ scale_: Optional[Dict[str, float]] = {}
47
+ Dictionary mapping input column names to relative scaling factor to achieve zero mean and unit variance.
31
48
  If a variance is zero, unit variance could not be achieved, and the data is left as-is, giving a scaling
32
49
  factor of 1. None if with_std is False.
33
- mean_: Dictionary mapping input column name to the mean value for that feature. None if with_mean is False.
34
- var_: Dictionary mapping input column name to the variance for that feature. Used to compute scale_. None if
50
+
51
+ mean_: Optional[Dict[str, float]] = {}
52
+ Dictionary mapping input column name to the mean value for that feature. None if with_mean is False.
53
+
54
+ var_: Optional[Dict[str, float]] = {}
55
+ Dictionary mapping input column name to the variance for that feature. Used to compute scale_. None if
35
56
  with_std is False
36
57
  """
37
58
 
@@ -42,6 +63,7 @@ class StandardScaler(base.BaseTransformer):
42
63
  with_std: bool = True,
43
64
  input_cols: Optional[Union[str, Iterable[str]]] = None,
44
65
  output_cols: Optional[Union[str, Iterable[str]]] = None,
66
+ passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
45
67
  drop_input_cols: Optional[bool] = False,
46
68
  ) -> None:
47
69
  """
@@ -57,6 +79,11 @@ class StandardScaler(base.BaseTransformer):
57
79
  unit standard deviation).
58
80
  input_cols: Single or multiple input columns.
59
81
  output_cols: Single or multiple output columns.
82
+ passthrough_cols: A string or a list of strings indicating column names to be excluded from any
83
+ operations (such as train, transform, or inference). These specified column(s)
84
+ will remain untouched throughout the process. This option is helful in scenarios
85
+ requiring automatic input_cols inference, but need to avoid using specific
86
+ columns, like index columns, during in training or inference.
60
87
  drop_input_cols: Remove input columns from output if set True. False by default.
61
88
 
62
89
  Attributes:
@@ -90,6 +117,7 @@ class StandardScaler(base.BaseTransformer):
90
117
 
91
118
  self.set_input_cols(input_cols)
92
119
  self.set_output_cols(output_cols)
120
+ self.set_passthrough_cols(passthrough_cols)
93
121
 
94
122
  def _reset(self) -> None:
95
123
  """
@@ -165,10 +193,6 @@ class StandardScaler(base.BaseTransformer):
165
193
  project=base.PROJECT,
166
194
  subproject=base.SUBPROJECT,
167
195
  )
168
- @telemetry.add_stmt_params_to_df(
169
- project=base.PROJECT,
170
- subproject=base.SUBPROJECT,
171
- )
172
196
  def transform(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> Union[snowpark.DataFrame, pd.DataFrame]:
173
197
  """
174
198
  Perform standardization by centering and scaling.