snowflake-ml-python 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. snowflake/cortex/_complete.py +1 -1
  2. snowflake/cortex/_extract_answer.py +1 -1
  3. snowflake/cortex/_sentiment.py +1 -1
  4. snowflake/cortex/_summarize.py +1 -1
  5. snowflake/cortex/_translate.py +1 -1
  6. snowflake/ml/_internal/env_utils.py +68 -6
  7. snowflake/ml/_internal/file_utils.py +34 -4
  8. snowflake/ml/_internal/telemetry.py +79 -91
  9. snowflake/ml/_internal/utils/retryable_http.py +16 -4
  10. snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
  11. snowflake/ml/dataset/dataset.py +1 -1
  12. snowflake/ml/model/_api.py +21 -14
  13. snowflake/ml/model/_client/model/model_impl.py +176 -0
  14. snowflake/ml/model/_client/model/model_method_info.py +19 -0
  15. snowflake/ml/model/_client/model/model_version_impl.py +291 -0
  16. snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
  17. snowflake/ml/model/_client/ops/model_ops.py +308 -0
  18. snowflake/ml/model/_client/sql/model.py +75 -0
  19. snowflake/ml/model/_client/sql/model_version.py +213 -0
  20. snowflake/ml/model/_client/sql/stage.py +40 -0
  21. snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
  22. snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
  23. snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
  24. snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
  25. snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
  26. snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
  27. snowflake/ml/model/_model_composer/model_composer.py +31 -9
  28. snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
  29. snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
  30. snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
  31. snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
  32. snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
  33. snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
  34. snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
  35. snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
  36. snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
  37. snowflake/ml/model/model_signature.py +108 -53
  38. snowflake/ml/model/type_hints.py +1 -0
  39. snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
  40. snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
  41. snowflake/ml/modeling/_internal/model_specifications.py +146 -0
  42. snowflake/ml/modeling/_internal/model_trainer.py +13 -0
  43. snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
  44. snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
  45. snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
  46. snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
  47. snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +96 -124
  48. snowflake/ml/modeling/cluster/affinity_propagation.py +94 -124
  49. snowflake/ml/modeling/cluster/agglomerative_clustering.py +94 -124
  50. snowflake/ml/modeling/cluster/birch.py +94 -124
  51. snowflake/ml/modeling/cluster/bisecting_k_means.py +94 -124
  52. snowflake/ml/modeling/cluster/dbscan.py +94 -124
  53. snowflake/ml/modeling/cluster/feature_agglomeration.py +94 -124
  54. snowflake/ml/modeling/cluster/k_means.py +93 -124
  55. snowflake/ml/modeling/cluster/mean_shift.py +94 -124
  56. snowflake/ml/modeling/cluster/mini_batch_k_means.py +93 -124
  57. snowflake/ml/modeling/cluster/optics.py +94 -124
  58. snowflake/ml/modeling/cluster/spectral_biclustering.py +94 -124
  59. snowflake/ml/modeling/cluster/spectral_clustering.py +94 -124
  60. snowflake/ml/modeling/cluster/spectral_coclustering.py +94 -124
  61. snowflake/ml/modeling/compose/column_transformer.py +94 -124
  62. snowflake/ml/modeling/compose/transformed_target_regressor.py +96 -124
  63. snowflake/ml/modeling/covariance/elliptic_envelope.py +94 -124
  64. snowflake/ml/modeling/covariance/empirical_covariance.py +80 -110
  65. snowflake/ml/modeling/covariance/graphical_lasso.py +94 -124
  66. snowflake/ml/modeling/covariance/graphical_lasso_cv.py +94 -124
  67. snowflake/ml/modeling/covariance/ledoit_wolf.py +85 -115
  68. snowflake/ml/modeling/covariance/min_cov_det.py +94 -124
  69. snowflake/ml/modeling/covariance/oas.py +80 -110
  70. snowflake/ml/modeling/covariance/shrunk_covariance.py +84 -114
  71. snowflake/ml/modeling/decomposition/dictionary_learning.py +94 -124
  72. snowflake/ml/modeling/decomposition/factor_analysis.py +94 -124
  73. snowflake/ml/modeling/decomposition/fast_ica.py +94 -124
  74. snowflake/ml/modeling/decomposition/incremental_pca.py +94 -124
  75. snowflake/ml/modeling/decomposition/kernel_pca.py +94 -124
  76. snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +94 -124
  77. snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +94 -124
  78. snowflake/ml/modeling/decomposition/pca.py +94 -124
  79. snowflake/ml/modeling/decomposition/sparse_pca.py +94 -124
  80. snowflake/ml/modeling/decomposition/truncated_svd.py +94 -124
  81. snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +96 -124
  82. snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +91 -119
  83. snowflake/ml/modeling/ensemble/ada_boost_classifier.py +96 -124
  84. snowflake/ml/modeling/ensemble/ada_boost_regressor.py +96 -124
  85. snowflake/ml/modeling/ensemble/bagging_classifier.py +96 -124
  86. snowflake/ml/modeling/ensemble/bagging_regressor.py +96 -124
  87. snowflake/ml/modeling/ensemble/extra_trees_classifier.py +96 -124
  88. snowflake/ml/modeling/ensemble/extra_trees_regressor.py +96 -124
  89. snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +96 -124
  90. snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +96 -124
  91. snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +96 -124
  92. snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +96 -124
  93. snowflake/ml/modeling/ensemble/isolation_forest.py +94 -124
  94. snowflake/ml/modeling/ensemble/random_forest_classifier.py +96 -124
  95. snowflake/ml/modeling/ensemble/random_forest_regressor.py +96 -124
  96. snowflake/ml/modeling/ensemble/stacking_regressor.py +96 -124
  97. snowflake/ml/modeling/ensemble/voting_classifier.py +96 -124
  98. snowflake/ml/modeling/ensemble/voting_regressor.py +91 -119
  99. snowflake/ml/modeling/feature_selection/generic_univariate_select.py +82 -110
  100. snowflake/ml/modeling/feature_selection/select_fdr.py +80 -108
  101. snowflake/ml/modeling/feature_selection/select_fpr.py +80 -108
  102. snowflake/ml/modeling/feature_selection/select_fwe.py +80 -108
  103. snowflake/ml/modeling/feature_selection/select_k_best.py +81 -109
  104. snowflake/ml/modeling/feature_selection/select_percentile.py +80 -108
  105. snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +94 -124
  106. snowflake/ml/modeling/feature_selection/variance_threshold.py +76 -106
  107. snowflake/ml/modeling/framework/base.py +2 -2
  108. snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +96 -124
  109. snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +96 -124
  110. snowflake/ml/modeling/impute/iterative_imputer.py +94 -124
  111. snowflake/ml/modeling/impute/knn_imputer.py +94 -124
  112. snowflake/ml/modeling/impute/missing_indicator.py +94 -124
  113. snowflake/ml/modeling/impute/simple_imputer.py +1 -1
  114. snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +77 -107
  115. snowflake/ml/modeling/kernel_approximation/nystroem.py +94 -124
  116. snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +94 -124
  117. snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +86 -116
  118. snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +84 -114
  119. snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +96 -124
  120. snowflake/ml/modeling/lightgbm/lgbm_classifier.py +71 -100
  121. snowflake/ml/modeling/lightgbm/lgbm_regressor.py +71 -100
  122. snowflake/ml/modeling/linear_model/ard_regression.py +96 -124
  123. snowflake/ml/modeling/linear_model/bayesian_ridge.py +96 -124
  124. snowflake/ml/modeling/linear_model/elastic_net.py +96 -124
  125. snowflake/ml/modeling/linear_model/elastic_net_cv.py +96 -124
  126. snowflake/ml/modeling/linear_model/gamma_regressor.py +96 -124
  127. snowflake/ml/modeling/linear_model/huber_regressor.py +96 -124
  128. snowflake/ml/modeling/linear_model/lars.py +96 -124
  129. snowflake/ml/modeling/linear_model/lars_cv.py +96 -124
  130. snowflake/ml/modeling/linear_model/lasso.py +96 -124
  131. snowflake/ml/modeling/linear_model/lasso_cv.py +96 -124
  132. snowflake/ml/modeling/linear_model/lasso_lars.py +96 -124
  133. snowflake/ml/modeling/linear_model/lasso_lars_cv.py +96 -124
  134. snowflake/ml/modeling/linear_model/lasso_lars_ic.py +96 -124
  135. snowflake/ml/modeling/linear_model/linear_regression.py +91 -119
  136. snowflake/ml/modeling/linear_model/logistic_regression.py +96 -124
  137. snowflake/ml/modeling/linear_model/logistic_regression_cv.py +96 -124
  138. snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +96 -124
  139. snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +96 -124
  140. snowflake/ml/modeling/linear_model/multi_task_lasso.py +96 -124
  141. snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +96 -124
  142. snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +96 -124
  143. snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +96 -124
  144. snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +95 -124
  145. snowflake/ml/modeling/linear_model/perceptron.py +95 -124
  146. snowflake/ml/modeling/linear_model/poisson_regressor.py +96 -124
  147. snowflake/ml/modeling/linear_model/ransac_regressor.py +96 -124
  148. snowflake/ml/modeling/linear_model/ridge.py +96 -124
  149. snowflake/ml/modeling/linear_model/ridge_classifier.py +96 -124
  150. snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +96 -124
  151. snowflake/ml/modeling/linear_model/ridge_cv.py +96 -124
  152. snowflake/ml/modeling/linear_model/sgd_classifier.py +96 -124
  153. snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +94 -124
  154. snowflake/ml/modeling/linear_model/sgd_regressor.py +96 -124
  155. snowflake/ml/modeling/linear_model/theil_sen_regressor.py +96 -124
  156. snowflake/ml/modeling/linear_model/tweedie_regressor.py +96 -124
  157. snowflake/ml/modeling/manifold/isomap.py +94 -124
  158. snowflake/ml/modeling/manifold/mds.py +94 -124
  159. snowflake/ml/modeling/manifold/spectral_embedding.py +94 -124
  160. snowflake/ml/modeling/manifold/tsne.py +94 -124
  161. snowflake/ml/modeling/metrics/classification.py +187 -52
  162. snowflake/ml/modeling/metrics/correlation.py +4 -2
  163. snowflake/ml/modeling/metrics/covariance.py +7 -4
  164. snowflake/ml/modeling/metrics/ranking.py +32 -16
  165. snowflake/ml/modeling/metrics/regression.py +60 -32
  166. snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +94 -124
  167. snowflake/ml/modeling/mixture/gaussian_mixture.py +94 -124
  168. snowflake/ml/modeling/model_selection/grid_search_cv.py +88 -138
  169. snowflake/ml/modeling/model_selection/randomized_search_cv.py +90 -144
  170. snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +86 -114
  171. snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +93 -121
  172. snowflake/ml/modeling/multiclass/output_code_classifier.py +94 -122
  173. snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +92 -120
  174. snowflake/ml/modeling/naive_bayes/categorical_nb.py +96 -124
  175. snowflake/ml/modeling/naive_bayes/complement_nb.py +92 -120
  176. snowflake/ml/modeling/naive_bayes/gaussian_nb.py +79 -107
  177. snowflake/ml/modeling/naive_bayes/multinomial_nb.py +88 -116
  178. snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +96 -124
  179. snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +96 -124
  180. snowflake/ml/modeling/neighbors/kernel_density.py +94 -124
  181. snowflake/ml/modeling/neighbors/local_outlier_factor.py +94 -124
  182. snowflake/ml/modeling/neighbors/nearest_centroid.py +89 -117
  183. snowflake/ml/modeling/neighbors/nearest_neighbors.py +94 -124
  184. snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +96 -124
  185. snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +96 -124
  186. snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +96 -124
  187. snowflake/ml/modeling/neural_network/bernoulli_rbm.py +94 -124
  188. snowflake/ml/modeling/neural_network/mlp_classifier.py +96 -124
  189. snowflake/ml/modeling/neural_network/mlp_regressor.py +96 -124
  190. snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
  191. snowflake/ml/modeling/preprocessing/binarizer.py +14 -9
  192. snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +0 -4
  193. snowflake/ml/modeling/preprocessing/label_encoder.py +21 -13
  194. snowflake/ml/modeling/preprocessing/max_abs_scaler.py +20 -14
  195. snowflake/ml/modeling/preprocessing/min_max_scaler.py +35 -19
  196. snowflake/ml/modeling/preprocessing/normalizer.py +6 -9
  197. snowflake/ml/modeling/preprocessing/one_hot_encoder.py +20 -13
  198. snowflake/ml/modeling/preprocessing/ordinal_encoder.py +25 -13
  199. snowflake/ml/modeling/preprocessing/polynomial_features.py +94 -124
  200. snowflake/ml/modeling/preprocessing/robust_scaler.py +28 -14
  201. snowflake/ml/modeling/preprocessing/standard_scaler.py +25 -13
  202. snowflake/ml/modeling/semi_supervised/label_propagation.py +96 -124
  203. snowflake/ml/modeling/semi_supervised/label_spreading.py +96 -124
  204. snowflake/ml/modeling/svm/linear_svc.py +96 -124
  205. snowflake/ml/modeling/svm/linear_svr.py +96 -124
  206. snowflake/ml/modeling/svm/nu_svc.py +96 -124
  207. snowflake/ml/modeling/svm/nu_svr.py +96 -124
  208. snowflake/ml/modeling/svm/svc.py +96 -124
  209. snowflake/ml/modeling/svm/svr.py +96 -124
  210. snowflake/ml/modeling/tree/decision_tree_classifier.py +96 -124
  211. snowflake/ml/modeling/tree/decision_tree_regressor.py +96 -124
  212. snowflake/ml/modeling/tree/extra_tree_classifier.py +96 -124
  213. snowflake/ml/modeling/tree/extra_tree_regressor.py +96 -124
  214. snowflake/ml/modeling/xgboost/xgb_classifier.py +96 -125
  215. snowflake/ml/modeling/xgboost/xgb_regressor.py +96 -125
  216. snowflake/ml/modeling/xgboost/xgbrf_classifier.py +96 -125
  217. snowflake/ml/modeling/xgboost/xgbrf_regressor.py +96 -125
  218. snowflake/ml/registry/model_registry.py +2 -0
  219. snowflake/ml/registry/registry.py +215 -0
  220. snowflake/ml/version.py +1 -1
  221. {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +21 -3
  222. snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
  223. snowflake_ml_python-1.1.1.dist-info/RECORD +0 -331
  224. {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -23,17 +23,19 @@ from sklearn.utils.metaestimators import available_if
23
23
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
24
24
  from snowflake.ml._internal import telemetry
25
25
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
26
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
26
27
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
27
- from snowflake.snowpark import DataFrame
28
+ from snowflake.snowpark import DataFrame, Session
28
29
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
29
30
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
31
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
32
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
30
33
  from snowflake.ml.modeling._internal.estimator_utils import (
31
34
  gather_dependencies,
32
35
  original_estimator_has_callable,
33
36
  transform_snowml_obj_to_sklearn_obj,
34
37
  validate_sklearn_args,
35
38
  )
36
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
37
39
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
38
40
 
39
41
  from snowflake.ml.model.model_signature import (
@@ -53,7 +55,6 @@ _PROJECT = "ModelDevelopment"
53
55
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.feature_selection".replace("sklearn.", "").split("_")])
54
56
 
55
57
 
56
-
57
58
  class GenericUnivariateSelect(BaseTransformer):
58
59
  r"""Univariate feature selector with configurable strategy
59
60
  For more details on this class, see [sklearn.feature_selection.GenericUnivariateSelect]
@@ -61,52 +62,61 @@ class GenericUnivariateSelect(BaseTransformer):
61
62
 
62
63
  Parameters
63
64
  ----------
64
- score_func: callable, default=f_classif
65
- Function taking two arrays X and y, and returning a pair of arrays
66
- (scores, pvalues). For modes 'percentile' or 'kbest' it can return
67
- a single array scores.
68
-
69
- mode: {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'
70
- Feature selection mode.
71
-
72
- param: "all", float or int, default=1e-5
73
- Parameter of the corresponding mode.
74
65
 
75
66
  input_cols: Optional[Union[str, List[str]]]
76
67
  A string or list of strings representing column names that contain features.
77
68
  If this parameter is not specified, all columns in the input DataFrame except
78
69
  the columns specified by label_cols, sample_weight_col, and passthrough_cols
79
- parameters are considered input columns.
80
-
70
+ parameters are considered input columns. Input columns can also be set after
71
+ initialization with the `set_input_cols` method.
72
+
81
73
  label_cols: Optional[Union[str, List[str]]]
82
74
  A string or list of strings representing column names that contain labels.
83
- This is a required param for estimators, as there is no way to infer these
84
- columns. If this parameter is not specified, then object is fitted without
85
- labels (like a transformer).
75
+ Label columns must be specified with this parameter during initialization
76
+ or with the `set_label_cols` method before fitting.
86
77
 
87
78
  output_cols: Optional[Union[str, List[str]]]
88
79
  A string or list of strings representing column names that will store the
89
80
  output of predict and transform operations. The length of output_cols must
90
- match the expected number of output columns from the specific estimator or
81
+ match the expected number of output columns from the specific predictor or
91
82
  transformer class used.
92
- If this parameter is not specified, output column names are derived by
93
- adding an OUTPUT_ prefix to the label column names. These inferred output
94
- column names work for estimator's predict() method, but output_cols must
95
- be set explicitly for transformers.
83
+ If you omit this parameter, output column names are derived by adding an
84
+ OUTPUT_ prefix to the label column names for supervised estimators, or
85
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
86
+ work for predictors, but output_cols must be set explicitly for transformers.
87
+ In general, explicitly specifying output column names is clearer, especially
88
+ if you don’t specify the input column names.
89
+ To transform in place, pass the same names for input_cols and output_cols.
90
+ be set explicitly for transformers. Output columns can also be set after
91
+ initialization with the `set_output_cols` method.
96
92
 
97
93
  sample_weight_col: Optional[str]
98
94
  A string representing the column name containing the sample weights.
99
- This argument is only required when working with weighted datasets.
95
+ This argument is only required when working with weighted datasets. Sample
96
+ weight column can also be set after initialization with the
97
+ `set_sample_weight_col` method.
100
98
 
101
99
  passthrough_cols: Optional[Union[str, List[str]]]
102
100
  A string or a list of strings indicating column names to be excluded from any
103
101
  operations (such as train, transform, or inference). These specified column(s)
104
102
  will remain untouched throughout the process. This option is helpful in scenarios
105
103
  requiring automatic input_cols inference, but need to avoid using specific
106
- columns, like index columns, during training or inference.
104
+ columns, like index columns, during training or inference. Passthrough columns
105
+ can also be set after initialization with the `set_passthrough_cols` method.
107
106
 
108
107
  drop_input_cols: Optional[bool], default=False
109
108
  If set, the response of predict(), transform() methods will not contain input columns.
109
+
110
+ score_func: callable, default=f_classif
111
+ Function taking two arrays X and y, and returning a pair of arrays
112
+ (scores, pvalues). For modes 'percentile' or 'kbest' it can return
113
+ a single array scores.
114
+
115
+ mode: {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'
116
+ Feature selection mode.
117
+
118
+ param: "all", float or int, default=1e-5
119
+ Parameter of the corresponding mode.
110
120
  """
111
121
 
112
122
  def __init__( # type: ignore[no-untyped-def]
@@ -130,7 +140,7 @@ class GenericUnivariateSelect(BaseTransformer):
130
140
  self.set_passthrough_cols(passthrough_cols)
131
141
  self.set_drop_input_cols(drop_input_cols)
132
142
  self.set_sample_weight_col(sample_weight_col)
133
- deps = set(SklearnWrapperProvider().dependencies)
143
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
134
144
 
135
145
  self._deps = list(deps)
136
146
 
@@ -141,13 +151,14 @@ class GenericUnivariateSelect(BaseTransformer):
141
151
  args=init_args,
142
152
  klass=sklearn.feature_selection.GenericUnivariateSelect
143
153
  )
144
- self._sklearn_object = sklearn.feature_selection.GenericUnivariateSelect(
154
+ self._sklearn_object: Any = sklearn.feature_selection.GenericUnivariateSelect(
145
155
  **cleaned_up_init_args,
146
156
  )
147
157
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
148
158
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
149
159
  self._snowpark_cols: Optional[List[str]] = self.input_cols
150
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=GenericUnivariateSelect.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
160
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=GenericUnivariateSelect.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
161
+ self._autogenerated = True
151
162
 
152
163
  def _get_rand_id(self) -> str:
153
164
  """
@@ -203,54 +214,48 @@ class GenericUnivariateSelect(BaseTransformer):
203
214
  self
204
215
  """
205
216
  self._infer_input_output_cols(dataset)
206
- if isinstance(dataset, pd.DataFrame):
207
- assert self._sklearn_object is not None # keep mypy happy
208
- self._sklearn_object = self._handlers.fit_pandas(
209
- dataset,
210
- self._sklearn_object,
211
- self.input_cols,
212
- self.label_cols,
213
- self.sample_weight_col
214
- )
215
- elif isinstance(dataset, DataFrame):
216
- self._fit_snowpark(dataset)
217
- else:
218
- raise TypeError(
219
- f"Unexpected dataset type: {type(dataset)}."
220
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
221
- )
217
+ if isinstance(dataset, DataFrame):
218
+ session = dataset._session
219
+ assert session is not None # keep mypy happy
220
+ # Validate that key package version in user workspace are supported in snowflake conda channel
221
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
222
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
223
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
224
+
225
+ # Specify input columns so column pruning will be enforced
226
+ selected_cols = self._get_active_columns()
227
+ if len(selected_cols) > 0:
228
+ dataset = dataset.select(selected_cols)
229
+
230
+ self._snowpark_cols = dataset.select(self.input_cols).columns
231
+
232
+ # If we are already in a stored procedure, no need to kick off another one.
233
+ if SNOWML_SPROC_ENV in os.environ:
234
+ statement_params = telemetry.get_function_usage_statement_params(
235
+ project=_PROJECT,
236
+ subproject=_SUBPROJECT,
237
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), GenericUnivariateSelect.__class__.__name__),
238
+ api_calls=[Session.call],
239
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
240
+ )
241
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
242
+ pd_df.columns = dataset.columns
243
+ dataset = pd_df
244
+
245
+ model_trainer = ModelTrainerBuilder.build(
246
+ estimator=self._sklearn_object,
247
+ dataset=dataset,
248
+ input_cols=self.input_cols,
249
+ label_cols=self.label_cols,
250
+ sample_weight_col=self.sample_weight_col,
251
+ autogenerated=self._autogenerated,
252
+ subproject=_SUBPROJECT
253
+ )
254
+ self._sklearn_object = model_trainer.train()
222
255
  self._is_fitted = True
223
256
  self._get_model_signatures(dataset)
224
257
  return self
225
258
 
226
- def _fit_snowpark(self, dataset: DataFrame) -> None:
227
- session = dataset._session
228
- assert session is not None # keep mypy happy
229
- # Validate that key package version in user workspace are supported in snowflake conda channel
230
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
231
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
232
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
233
-
234
- # Specify input columns so column pruning will be enforced
235
- selected_cols = self._get_active_columns()
236
- if len(selected_cols) > 0:
237
- dataset = dataset.select(selected_cols)
238
-
239
- estimator = self._sklearn_object
240
- assert estimator is not None # Keep mypy happy
241
-
242
- self._snowpark_cols = dataset.select(self.input_cols).columns
243
-
244
- self._sklearn_object = self._handlers.fit_snowpark(
245
- dataset,
246
- session,
247
- estimator,
248
- ["snowflake-snowpark-python"] + self._get_dependencies(),
249
- self.input_cols,
250
- self.label_cols,
251
- self.sample_weight_col,
252
- )
253
-
254
259
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
255
260
  if self._drop_input_cols:
256
261
  return []
@@ -438,11 +443,6 @@ class GenericUnivariateSelect(BaseTransformer):
438
443
  subproject=_SUBPROJECT,
439
444
  custom_tags=dict([("autogen", True)]),
440
445
  )
441
- @telemetry.add_stmt_params_to_df(
442
- project=_PROJECT,
443
- subproject=_SUBPROJECT,
444
- custom_tags=dict([("autogen", True)]),
445
- )
446
446
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
447
447
  """Method not supported for this class.
448
448
 
@@ -494,11 +494,6 @@ class GenericUnivariateSelect(BaseTransformer):
494
494
  subproject=_SUBPROJECT,
495
495
  custom_tags=dict([("autogen", True)]),
496
496
  )
497
- @telemetry.add_stmt_params_to_df(
498
- project=_PROJECT,
499
- subproject=_SUBPROJECT,
500
- custom_tags=dict([("autogen", True)]),
501
- )
502
497
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
503
498
  """Reduce X to the selected features
504
499
  For more details on this function, see [sklearn.feature_selection.GenericUnivariateSelect.transform]
@@ -557,7 +552,8 @@ class GenericUnivariateSelect(BaseTransformer):
557
552
  if False:
558
553
  self.fit(dataset)
559
554
  assert self._sklearn_object is not None
560
- return self._sklearn_object.labels_
555
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
556
+ return labels
561
557
  else:
562
558
  raise NotImplementedError
563
559
 
@@ -593,6 +589,7 @@ class GenericUnivariateSelect(BaseTransformer):
593
589
  output_cols = []
594
590
 
595
591
  # Make sure column names are valid snowflake identifiers.
592
+ assert output_cols is not None # Make MyPy happy
596
593
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
597
594
 
598
595
  return rv
@@ -603,11 +600,6 @@ class GenericUnivariateSelect(BaseTransformer):
603
600
  subproject=_SUBPROJECT,
604
601
  custom_tags=dict([("autogen", True)]),
605
602
  )
606
- @telemetry.add_stmt_params_to_df(
607
- project=_PROJECT,
608
- subproject=_SUBPROJECT,
609
- custom_tags=dict([("autogen", True)]),
610
- )
611
603
  def predict_proba(
612
604
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
613
605
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -648,11 +640,6 @@ class GenericUnivariateSelect(BaseTransformer):
648
640
  subproject=_SUBPROJECT,
649
641
  custom_tags=dict([("autogen", True)]),
650
642
  )
651
- @telemetry.add_stmt_params_to_df(
652
- project=_PROJECT,
653
- subproject=_SUBPROJECT,
654
- custom_tags=dict([("autogen", True)]),
655
- )
656
643
  def predict_log_proba(
657
644
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
658
645
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -689,16 +676,6 @@ class GenericUnivariateSelect(BaseTransformer):
689
676
  return output_df
690
677
 
691
678
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
692
- @telemetry.send_api_usage_telemetry(
693
- project=_PROJECT,
694
- subproject=_SUBPROJECT,
695
- custom_tags=dict([("autogen", True)]),
696
- )
697
- @telemetry.add_stmt_params_to_df(
698
- project=_PROJECT,
699
- subproject=_SUBPROJECT,
700
- custom_tags=dict([("autogen", True)]),
701
- )
702
679
  def decision_function(
703
680
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
704
681
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -797,11 +774,6 @@ class GenericUnivariateSelect(BaseTransformer):
797
774
  subproject=_SUBPROJECT,
798
775
  custom_tags=dict([("autogen", True)]),
799
776
  )
800
- @telemetry.add_stmt_params_to_df(
801
- project=_PROJECT,
802
- subproject=_SUBPROJECT,
803
- custom_tags=dict([("autogen", True)]),
804
- )
805
777
  def kneighbors(
806
778
  self,
807
779
  dataset: Union[DataFrame, pd.DataFrame],
@@ -861,9 +833,9 @@ class GenericUnivariateSelect(BaseTransformer):
861
833
  # For classifier, the type of predict is the same as the type of label
862
834
  if self._sklearn_object._estimator_type == 'classifier':
863
835
  # label columns is the desired type for output
864
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
836
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
865
837
  # rename the output columns
866
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
838
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
867
839
  self._model_signature_dict["predict"] = ModelSignature(inputs,
868
840
  ([] if self._drop_input_cols else inputs)
869
841
  + outputs)
@@ -23,17 +23,19 @@ from sklearn.utils.metaestimators import available_if
23
23
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
24
24
  from snowflake.ml._internal import telemetry
25
25
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
26
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
26
27
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
27
- from snowflake.snowpark import DataFrame
28
+ from snowflake.snowpark import DataFrame, Session
28
29
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
29
30
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
31
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
32
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
30
33
  from snowflake.ml.modeling._internal.estimator_utils import (
31
34
  gather_dependencies,
32
35
  original_estimator_has_callable,
33
36
  transform_snowml_obj_to_sklearn_obj,
34
37
  validate_sklearn_args,
35
38
  )
36
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
37
39
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
38
40
 
39
41
  from snowflake.ml.model.model_signature import (
@@ -53,7 +55,6 @@ _PROJECT = "ModelDevelopment"
53
55
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.feature_selection".replace("sklearn.", "").split("_")])
54
56
 
55
57
 
56
-
57
58
  class SelectFdr(BaseTransformer):
58
59
  r"""Filter: Select the p-values for an estimated false discovery rate
59
60
  For more details on this class, see [sklearn.feature_selection.SelectFdr]
@@ -61,50 +62,59 @@ class SelectFdr(BaseTransformer):
61
62
 
62
63
  Parameters
63
64
  ----------
64
- score_func: callable, default=f_classif
65
- Function taking two arrays X and y, and returning a pair of arrays
66
- (scores, pvalues).
67
- Default is f_classif (see below "See Also"). The default function only
68
- works with classification tasks.
69
-
70
- alpha: float, default=5e-2
71
- The highest uncorrected p-value for features to keep.
72
65
 
73
66
  input_cols: Optional[Union[str, List[str]]]
74
67
  A string or list of strings representing column names that contain features.
75
68
  If this parameter is not specified, all columns in the input DataFrame except
76
69
  the columns specified by label_cols, sample_weight_col, and passthrough_cols
77
- parameters are considered input columns.
78
-
70
+ parameters are considered input columns. Input columns can also be set after
71
+ initialization with the `set_input_cols` method.
72
+
79
73
  label_cols: Optional[Union[str, List[str]]]
80
74
  A string or list of strings representing column names that contain labels.
81
- This is a required param for estimators, as there is no way to infer these
82
- columns. If this parameter is not specified, then object is fitted without
83
- labels (like a transformer).
75
+ Label columns must be specified with this parameter during initialization
76
+ or with the `set_label_cols` method before fitting.
84
77
 
85
78
  output_cols: Optional[Union[str, List[str]]]
86
79
  A string or list of strings representing column names that will store the
87
80
  output of predict and transform operations. The length of output_cols must
88
- match the expected number of output columns from the specific estimator or
81
+ match the expected number of output columns from the specific predictor or
89
82
  transformer class used.
90
- If this parameter is not specified, output column names are derived by
91
- adding an OUTPUT_ prefix to the label column names. These inferred output
92
- column names work for estimator's predict() method, but output_cols must
93
- be set explicitly for transformers.
83
+ If you omit this parameter, output column names are derived by adding an
84
+ OUTPUT_ prefix to the label column names for supervised estimators, or
85
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
86
+ work for predictors, but output_cols must be set explicitly for transformers.
87
+ In general, explicitly specifying output column names is clearer, especially
88
+ if you don’t specify the input column names.
89
+ To transform in place, pass the same names for input_cols and output_cols.
90
+ be set explicitly for transformers. Output columns can also be set after
91
+ initialization with the `set_output_cols` method.
94
92
 
95
93
  sample_weight_col: Optional[str]
96
94
  A string representing the column name containing the sample weights.
97
- This argument is only required when working with weighted datasets.
95
+ This argument is only required when working with weighted datasets. Sample
96
+ weight column can also be set after initialization with the
97
+ `set_sample_weight_col` method.
98
98
 
99
99
  passthrough_cols: Optional[Union[str, List[str]]]
100
100
  A string or a list of strings indicating column names to be excluded from any
101
101
  operations (such as train, transform, or inference). These specified column(s)
102
102
  will remain untouched throughout the process. This option is helpful in scenarios
103
103
  requiring automatic input_cols inference, but need to avoid using specific
104
- columns, like index columns, during training or inference.
104
+ columns, like index columns, during training or inference. Passthrough columns
105
+ can also be set after initialization with the `set_passthrough_cols` method.
105
106
 
106
107
  drop_input_cols: Optional[bool], default=False
107
108
  If set, the response of predict(), transform() methods will not contain input columns.
109
+
110
+ score_func: callable, default=f_classif
111
+ Function taking two arrays X and y, and returning a pair of arrays
112
+ (scores, pvalues).
113
+ Default is f_classif (see below "See Also"). The default function only
114
+ works with classification tasks.
115
+
116
+ alpha: float, default=5e-2
117
+ The highest uncorrected p-value for features to keep.
108
118
  """
109
119
 
110
120
  def __init__( # type: ignore[no-untyped-def]
@@ -127,7 +137,7 @@ class SelectFdr(BaseTransformer):
127
137
  self.set_passthrough_cols(passthrough_cols)
128
138
  self.set_drop_input_cols(drop_input_cols)
129
139
  self.set_sample_weight_col(sample_weight_col)
130
- deps = set(SklearnWrapperProvider().dependencies)
140
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
131
141
 
132
142
  self._deps = list(deps)
133
143
 
@@ -137,13 +147,14 @@ class SelectFdr(BaseTransformer):
137
147
  args=init_args,
138
148
  klass=sklearn.feature_selection.SelectFdr
139
149
  )
140
- self._sklearn_object = sklearn.feature_selection.SelectFdr(
150
+ self._sklearn_object: Any = sklearn.feature_selection.SelectFdr(
141
151
  **cleaned_up_init_args,
142
152
  )
143
153
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
144
154
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
145
155
  self._snowpark_cols: Optional[List[str]] = self.input_cols
146
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=SelectFdr.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
156
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=SelectFdr.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
157
+ self._autogenerated = True
147
158
 
148
159
  def _get_rand_id(self) -> str:
149
160
  """
@@ -199,54 +210,48 @@ class SelectFdr(BaseTransformer):
199
210
  self
200
211
  """
201
212
  self._infer_input_output_cols(dataset)
202
- if isinstance(dataset, pd.DataFrame):
203
- assert self._sklearn_object is not None # keep mypy happy
204
- self._sklearn_object = self._handlers.fit_pandas(
205
- dataset,
206
- self._sklearn_object,
207
- self.input_cols,
208
- self.label_cols,
209
- self.sample_weight_col
210
- )
211
- elif isinstance(dataset, DataFrame):
212
- self._fit_snowpark(dataset)
213
- else:
214
- raise TypeError(
215
- f"Unexpected dataset type: {type(dataset)}."
216
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
217
- )
213
+ if isinstance(dataset, DataFrame):
214
+ session = dataset._session
215
+ assert session is not None # keep mypy happy
216
+ # Validate that key package version in user workspace are supported in snowflake conda channel
217
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
218
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
219
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
220
+
221
+ # Specify input columns so column pruning will be enforced
222
+ selected_cols = self._get_active_columns()
223
+ if len(selected_cols) > 0:
224
+ dataset = dataset.select(selected_cols)
225
+
226
+ self._snowpark_cols = dataset.select(self.input_cols).columns
227
+
228
+ # If we are already in a stored procedure, no need to kick off another one.
229
+ if SNOWML_SPROC_ENV in os.environ:
230
+ statement_params = telemetry.get_function_usage_statement_params(
231
+ project=_PROJECT,
232
+ subproject=_SUBPROJECT,
233
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), SelectFdr.__class__.__name__),
234
+ api_calls=[Session.call],
235
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
236
+ )
237
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
238
+ pd_df.columns = dataset.columns
239
+ dataset = pd_df
240
+
241
+ model_trainer = ModelTrainerBuilder.build(
242
+ estimator=self._sklearn_object,
243
+ dataset=dataset,
244
+ input_cols=self.input_cols,
245
+ label_cols=self.label_cols,
246
+ sample_weight_col=self.sample_weight_col,
247
+ autogenerated=self._autogenerated,
248
+ subproject=_SUBPROJECT
249
+ )
250
+ self._sklearn_object = model_trainer.train()
218
251
  self._is_fitted = True
219
252
  self._get_model_signatures(dataset)
220
253
  return self
221
254
 
222
- def _fit_snowpark(self, dataset: DataFrame) -> None:
223
- session = dataset._session
224
- assert session is not None # keep mypy happy
225
- # Validate that key package version in user workspace are supported in snowflake conda channel
226
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
227
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
228
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
229
-
230
- # Specify input columns so column pruning will be enforced
231
- selected_cols = self._get_active_columns()
232
- if len(selected_cols) > 0:
233
- dataset = dataset.select(selected_cols)
234
-
235
- estimator = self._sklearn_object
236
- assert estimator is not None # Keep mypy happy
237
-
238
- self._snowpark_cols = dataset.select(self.input_cols).columns
239
-
240
- self._sklearn_object = self._handlers.fit_snowpark(
241
- dataset,
242
- session,
243
- estimator,
244
- ["snowflake-snowpark-python"] + self._get_dependencies(),
245
- self.input_cols,
246
- self.label_cols,
247
- self.sample_weight_col,
248
- )
249
-
250
255
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
251
256
  if self._drop_input_cols:
252
257
  return []
@@ -434,11 +439,6 @@ class SelectFdr(BaseTransformer):
434
439
  subproject=_SUBPROJECT,
435
440
  custom_tags=dict([("autogen", True)]),
436
441
  )
437
- @telemetry.add_stmt_params_to_df(
438
- project=_PROJECT,
439
- subproject=_SUBPROJECT,
440
- custom_tags=dict([("autogen", True)]),
441
- )
442
442
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
443
443
  """Method not supported for this class.
444
444
 
@@ -490,11 +490,6 @@ class SelectFdr(BaseTransformer):
490
490
  subproject=_SUBPROJECT,
491
491
  custom_tags=dict([("autogen", True)]),
492
492
  )
493
- @telemetry.add_stmt_params_to_df(
494
- project=_PROJECT,
495
- subproject=_SUBPROJECT,
496
- custom_tags=dict([("autogen", True)]),
497
- )
498
493
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
499
494
  """Reduce X to the selected features
500
495
  For more details on this function, see [sklearn.feature_selection.SelectFdr.transform]
@@ -553,7 +548,8 @@ class SelectFdr(BaseTransformer):
553
548
  if False:
554
549
  self.fit(dataset)
555
550
  assert self._sklearn_object is not None
556
- return self._sklearn_object.labels_
551
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
552
+ return labels
557
553
  else:
558
554
  raise NotImplementedError
559
555
 
@@ -589,6 +585,7 @@ class SelectFdr(BaseTransformer):
589
585
  output_cols = []
590
586
 
591
587
  # Make sure column names are valid snowflake identifiers.
588
+ assert output_cols is not None # Make MyPy happy
592
589
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
593
590
 
594
591
  return rv
@@ -599,11 +596,6 @@ class SelectFdr(BaseTransformer):
599
596
  subproject=_SUBPROJECT,
600
597
  custom_tags=dict([("autogen", True)]),
601
598
  )
602
- @telemetry.add_stmt_params_to_df(
603
- project=_PROJECT,
604
- subproject=_SUBPROJECT,
605
- custom_tags=dict([("autogen", True)]),
606
- )
607
599
  def predict_proba(
608
600
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
609
601
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -644,11 +636,6 @@ class SelectFdr(BaseTransformer):
644
636
  subproject=_SUBPROJECT,
645
637
  custom_tags=dict([("autogen", True)]),
646
638
  )
647
- @telemetry.add_stmt_params_to_df(
648
- project=_PROJECT,
649
- subproject=_SUBPROJECT,
650
- custom_tags=dict([("autogen", True)]),
651
- )
652
639
  def predict_log_proba(
653
640
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
654
641
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -685,16 +672,6 @@ class SelectFdr(BaseTransformer):
685
672
  return output_df
686
673
 
687
674
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
688
- @telemetry.send_api_usage_telemetry(
689
- project=_PROJECT,
690
- subproject=_SUBPROJECT,
691
- custom_tags=dict([("autogen", True)]),
692
- )
693
- @telemetry.add_stmt_params_to_df(
694
- project=_PROJECT,
695
- subproject=_SUBPROJECT,
696
- custom_tags=dict([("autogen", True)]),
697
- )
698
675
  def decision_function(
699
676
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
700
677
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -793,11 +770,6 @@ class SelectFdr(BaseTransformer):
793
770
  subproject=_SUBPROJECT,
794
771
  custom_tags=dict([("autogen", True)]),
795
772
  )
796
- @telemetry.add_stmt_params_to_df(
797
- project=_PROJECT,
798
- subproject=_SUBPROJECT,
799
- custom_tags=dict([("autogen", True)]),
800
- )
801
773
  def kneighbors(
802
774
  self,
803
775
  dataset: Union[DataFrame, pd.DataFrame],
@@ -857,9 +829,9 @@ class SelectFdr(BaseTransformer):
857
829
  # For classifier, the type of predict is the same as the type of label
858
830
  if self._sklearn_object._estimator_type == 'classifier':
859
831
  # label columns is the desired type for output
860
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
832
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
861
833
  # rename the output columns
862
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
834
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
863
835
  self._model_signature_dict["predict"] = ModelSignature(inputs,
864
836
  ([] if self._drop_input_cols else inputs)
865
837
  + outputs)