snowflake-ml-python 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. snowflake/cortex/_complete.py +1 -1
  2. snowflake/cortex/_extract_answer.py +1 -1
  3. snowflake/cortex/_sentiment.py +1 -1
  4. snowflake/cortex/_summarize.py +1 -1
  5. snowflake/cortex/_translate.py +1 -1
  6. snowflake/ml/_internal/env_utils.py +68 -6
  7. snowflake/ml/_internal/file_utils.py +34 -4
  8. snowflake/ml/_internal/telemetry.py +79 -91
  9. snowflake/ml/_internal/utils/retryable_http.py +16 -4
  10. snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
  11. snowflake/ml/dataset/dataset.py +1 -1
  12. snowflake/ml/model/_api.py +21 -14
  13. snowflake/ml/model/_client/model/model_impl.py +176 -0
  14. snowflake/ml/model/_client/model/model_method_info.py +19 -0
  15. snowflake/ml/model/_client/model/model_version_impl.py +291 -0
  16. snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
  17. snowflake/ml/model/_client/ops/model_ops.py +308 -0
  18. snowflake/ml/model/_client/sql/model.py +75 -0
  19. snowflake/ml/model/_client/sql/model_version.py +213 -0
  20. snowflake/ml/model/_client/sql/stage.py +40 -0
  21. snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
  22. snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
  23. snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
  24. snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
  25. snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
  26. snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
  27. snowflake/ml/model/_model_composer/model_composer.py +31 -9
  28. snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
  29. snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
  30. snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
  31. snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
  32. snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
  33. snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
  34. snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
  35. snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
  36. snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
  37. snowflake/ml/model/model_signature.py +108 -53
  38. snowflake/ml/model/type_hints.py +1 -0
  39. snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
  40. snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
  41. snowflake/ml/modeling/_internal/model_specifications.py +146 -0
  42. snowflake/ml/modeling/_internal/model_trainer.py +13 -0
  43. snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
  44. snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
  45. snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
  46. snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
  47. snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +96 -124
  48. snowflake/ml/modeling/cluster/affinity_propagation.py +94 -124
  49. snowflake/ml/modeling/cluster/agglomerative_clustering.py +94 -124
  50. snowflake/ml/modeling/cluster/birch.py +94 -124
  51. snowflake/ml/modeling/cluster/bisecting_k_means.py +94 -124
  52. snowflake/ml/modeling/cluster/dbscan.py +94 -124
  53. snowflake/ml/modeling/cluster/feature_agglomeration.py +94 -124
  54. snowflake/ml/modeling/cluster/k_means.py +93 -124
  55. snowflake/ml/modeling/cluster/mean_shift.py +94 -124
  56. snowflake/ml/modeling/cluster/mini_batch_k_means.py +93 -124
  57. snowflake/ml/modeling/cluster/optics.py +94 -124
  58. snowflake/ml/modeling/cluster/spectral_biclustering.py +94 -124
  59. snowflake/ml/modeling/cluster/spectral_clustering.py +94 -124
  60. snowflake/ml/modeling/cluster/spectral_coclustering.py +94 -124
  61. snowflake/ml/modeling/compose/column_transformer.py +94 -124
  62. snowflake/ml/modeling/compose/transformed_target_regressor.py +96 -124
  63. snowflake/ml/modeling/covariance/elliptic_envelope.py +94 -124
  64. snowflake/ml/modeling/covariance/empirical_covariance.py +80 -110
  65. snowflake/ml/modeling/covariance/graphical_lasso.py +94 -124
  66. snowflake/ml/modeling/covariance/graphical_lasso_cv.py +94 -124
  67. snowflake/ml/modeling/covariance/ledoit_wolf.py +85 -115
  68. snowflake/ml/modeling/covariance/min_cov_det.py +94 -124
  69. snowflake/ml/modeling/covariance/oas.py +80 -110
  70. snowflake/ml/modeling/covariance/shrunk_covariance.py +84 -114
  71. snowflake/ml/modeling/decomposition/dictionary_learning.py +94 -124
  72. snowflake/ml/modeling/decomposition/factor_analysis.py +94 -124
  73. snowflake/ml/modeling/decomposition/fast_ica.py +94 -124
  74. snowflake/ml/modeling/decomposition/incremental_pca.py +94 -124
  75. snowflake/ml/modeling/decomposition/kernel_pca.py +94 -124
  76. snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +94 -124
  77. snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +94 -124
  78. snowflake/ml/modeling/decomposition/pca.py +94 -124
  79. snowflake/ml/modeling/decomposition/sparse_pca.py +94 -124
  80. snowflake/ml/modeling/decomposition/truncated_svd.py +94 -124
  81. snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +96 -124
  82. snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +91 -119
  83. snowflake/ml/modeling/ensemble/ada_boost_classifier.py +96 -124
  84. snowflake/ml/modeling/ensemble/ada_boost_regressor.py +96 -124
  85. snowflake/ml/modeling/ensemble/bagging_classifier.py +96 -124
  86. snowflake/ml/modeling/ensemble/bagging_regressor.py +96 -124
  87. snowflake/ml/modeling/ensemble/extra_trees_classifier.py +96 -124
  88. snowflake/ml/modeling/ensemble/extra_trees_regressor.py +96 -124
  89. snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +96 -124
  90. snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +96 -124
  91. snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +96 -124
  92. snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +96 -124
  93. snowflake/ml/modeling/ensemble/isolation_forest.py +94 -124
  94. snowflake/ml/modeling/ensemble/random_forest_classifier.py +96 -124
  95. snowflake/ml/modeling/ensemble/random_forest_regressor.py +96 -124
  96. snowflake/ml/modeling/ensemble/stacking_regressor.py +96 -124
  97. snowflake/ml/modeling/ensemble/voting_classifier.py +96 -124
  98. snowflake/ml/modeling/ensemble/voting_regressor.py +91 -119
  99. snowflake/ml/modeling/feature_selection/generic_univariate_select.py +82 -110
  100. snowflake/ml/modeling/feature_selection/select_fdr.py +80 -108
  101. snowflake/ml/modeling/feature_selection/select_fpr.py +80 -108
  102. snowflake/ml/modeling/feature_selection/select_fwe.py +80 -108
  103. snowflake/ml/modeling/feature_selection/select_k_best.py +81 -109
  104. snowflake/ml/modeling/feature_selection/select_percentile.py +80 -108
  105. snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +94 -124
  106. snowflake/ml/modeling/feature_selection/variance_threshold.py +76 -106
  107. snowflake/ml/modeling/framework/base.py +2 -2
  108. snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +96 -124
  109. snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +96 -124
  110. snowflake/ml/modeling/impute/iterative_imputer.py +94 -124
  111. snowflake/ml/modeling/impute/knn_imputer.py +94 -124
  112. snowflake/ml/modeling/impute/missing_indicator.py +94 -124
  113. snowflake/ml/modeling/impute/simple_imputer.py +1 -1
  114. snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +77 -107
  115. snowflake/ml/modeling/kernel_approximation/nystroem.py +94 -124
  116. snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +94 -124
  117. snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +86 -116
  118. snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +84 -114
  119. snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +96 -124
  120. snowflake/ml/modeling/lightgbm/lgbm_classifier.py +71 -100
  121. snowflake/ml/modeling/lightgbm/lgbm_regressor.py +71 -100
  122. snowflake/ml/modeling/linear_model/ard_regression.py +96 -124
  123. snowflake/ml/modeling/linear_model/bayesian_ridge.py +96 -124
  124. snowflake/ml/modeling/linear_model/elastic_net.py +96 -124
  125. snowflake/ml/modeling/linear_model/elastic_net_cv.py +96 -124
  126. snowflake/ml/modeling/linear_model/gamma_regressor.py +96 -124
  127. snowflake/ml/modeling/linear_model/huber_regressor.py +96 -124
  128. snowflake/ml/modeling/linear_model/lars.py +96 -124
  129. snowflake/ml/modeling/linear_model/lars_cv.py +96 -124
  130. snowflake/ml/modeling/linear_model/lasso.py +96 -124
  131. snowflake/ml/modeling/linear_model/lasso_cv.py +96 -124
  132. snowflake/ml/modeling/linear_model/lasso_lars.py +96 -124
  133. snowflake/ml/modeling/linear_model/lasso_lars_cv.py +96 -124
  134. snowflake/ml/modeling/linear_model/lasso_lars_ic.py +96 -124
  135. snowflake/ml/modeling/linear_model/linear_regression.py +91 -119
  136. snowflake/ml/modeling/linear_model/logistic_regression.py +96 -124
  137. snowflake/ml/modeling/linear_model/logistic_regression_cv.py +96 -124
  138. snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +96 -124
  139. snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +96 -124
  140. snowflake/ml/modeling/linear_model/multi_task_lasso.py +96 -124
  141. snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +96 -124
  142. snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +96 -124
  143. snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +96 -124
  144. snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +95 -124
  145. snowflake/ml/modeling/linear_model/perceptron.py +95 -124
  146. snowflake/ml/modeling/linear_model/poisson_regressor.py +96 -124
  147. snowflake/ml/modeling/linear_model/ransac_regressor.py +96 -124
  148. snowflake/ml/modeling/linear_model/ridge.py +96 -124
  149. snowflake/ml/modeling/linear_model/ridge_classifier.py +96 -124
  150. snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +96 -124
  151. snowflake/ml/modeling/linear_model/ridge_cv.py +96 -124
  152. snowflake/ml/modeling/linear_model/sgd_classifier.py +96 -124
  153. snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +94 -124
  154. snowflake/ml/modeling/linear_model/sgd_regressor.py +96 -124
  155. snowflake/ml/modeling/linear_model/theil_sen_regressor.py +96 -124
  156. snowflake/ml/modeling/linear_model/tweedie_regressor.py +96 -124
  157. snowflake/ml/modeling/manifold/isomap.py +94 -124
  158. snowflake/ml/modeling/manifold/mds.py +94 -124
  159. snowflake/ml/modeling/manifold/spectral_embedding.py +94 -124
  160. snowflake/ml/modeling/manifold/tsne.py +94 -124
  161. snowflake/ml/modeling/metrics/classification.py +187 -52
  162. snowflake/ml/modeling/metrics/correlation.py +4 -2
  163. snowflake/ml/modeling/metrics/covariance.py +7 -4
  164. snowflake/ml/modeling/metrics/ranking.py +32 -16
  165. snowflake/ml/modeling/metrics/regression.py +60 -32
  166. snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +94 -124
  167. snowflake/ml/modeling/mixture/gaussian_mixture.py +94 -124
  168. snowflake/ml/modeling/model_selection/grid_search_cv.py +88 -138
  169. snowflake/ml/modeling/model_selection/randomized_search_cv.py +90 -144
  170. snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +86 -114
  171. snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +93 -121
  172. snowflake/ml/modeling/multiclass/output_code_classifier.py +94 -122
  173. snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +92 -120
  174. snowflake/ml/modeling/naive_bayes/categorical_nb.py +96 -124
  175. snowflake/ml/modeling/naive_bayes/complement_nb.py +92 -120
  176. snowflake/ml/modeling/naive_bayes/gaussian_nb.py +79 -107
  177. snowflake/ml/modeling/naive_bayes/multinomial_nb.py +88 -116
  178. snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +96 -124
  179. snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +96 -124
  180. snowflake/ml/modeling/neighbors/kernel_density.py +94 -124
  181. snowflake/ml/modeling/neighbors/local_outlier_factor.py +94 -124
  182. snowflake/ml/modeling/neighbors/nearest_centroid.py +89 -117
  183. snowflake/ml/modeling/neighbors/nearest_neighbors.py +94 -124
  184. snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +96 -124
  185. snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +96 -124
  186. snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +96 -124
  187. snowflake/ml/modeling/neural_network/bernoulli_rbm.py +94 -124
  188. snowflake/ml/modeling/neural_network/mlp_classifier.py +96 -124
  189. snowflake/ml/modeling/neural_network/mlp_regressor.py +96 -124
  190. snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
  191. snowflake/ml/modeling/preprocessing/binarizer.py +14 -9
  192. snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +0 -4
  193. snowflake/ml/modeling/preprocessing/label_encoder.py +21 -13
  194. snowflake/ml/modeling/preprocessing/max_abs_scaler.py +20 -14
  195. snowflake/ml/modeling/preprocessing/min_max_scaler.py +35 -19
  196. snowflake/ml/modeling/preprocessing/normalizer.py +6 -9
  197. snowflake/ml/modeling/preprocessing/one_hot_encoder.py +20 -13
  198. snowflake/ml/modeling/preprocessing/ordinal_encoder.py +25 -13
  199. snowflake/ml/modeling/preprocessing/polynomial_features.py +94 -124
  200. snowflake/ml/modeling/preprocessing/robust_scaler.py +28 -14
  201. snowflake/ml/modeling/preprocessing/standard_scaler.py +25 -13
  202. snowflake/ml/modeling/semi_supervised/label_propagation.py +96 -124
  203. snowflake/ml/modeling/semi_supervised/label_spreading.py +96 -124
  204. snowflake/ml/modeling/svm/linear_svc.py +96 -124
  205. snowflake/ml/modeling/svm/linear_svr.py +96 -124
  206. snowflake/ml/modeling/svm/nu_svc.py +96 -124
  207. snowflake/ml/modeling/svm/nu_svr.py +96 -124
  208. snowflake/ml/modeling/svm/svc.py +96 -124
  209. snowflake/ml/modeling/svm/svr.py +96 -124
  210. snowflake/ml/modeling/tree/decision_tree_classifier.py +96 -124
  211. snowflake/ml/modeling/tree/decision_tree_regressor.py +96 -124
  212. snowflake/ml/modeling/tree/extra_tree_classifier.py +96 -124
  213. snowflake/ml/modeling/tree/extra_tree_regressor.py +96 -124
  214. snowflake/ml/modeling/xgboost/xgb_classifier.py +96 -125
  215. snowflake/ml/modeling/xgboost/xgb_regressor.py +96 -125
  216. snowflake/ml/modeling/xgboost/xgbrf_classifier.py +96 -125
  217. snowflake/ml/modeling/xgboost/xgbrf_regressor.py +96 -125
  218. snowflake/ml/registry/model_registry.py +2 -0
  219. snowflake/ml/registry/registry.py +215 -0
  220. snowflake/ml/version.py +1 -1
  221. {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +21 -3
  222. snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
  223. snowflake_ml_python-1.1.1.dist-info/RECORD +0 -331
  224. {snowflake_ml_python-1.1.1.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
22
22
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
23
23
  from snowflake.ml._internal import telemetry
24
24
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
25
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
25
26
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
26
- from snowflake.snowpark import DataFrame
27
+ from snowflake.snowpark import DataFrame, Session
27
28
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
28
29
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
30
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
31
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
29
32
  from snowflake.ml.modeling._internal.estimator_utils import (
30
33
  gather_dependencies,
31
34
  original_estimator_has_callable,
32
35
  transform_snowml_obj_to_sklearn_obj,
33
36
  validate_sklearn_args,
34
37
  )
35
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
36
38
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
37
39
 
38
40
  from snowflake.ml.model.model_signature import (
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
52
54
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.covariance".replace("sklearn.", "").split("_")])
53
55
 
54
56
 
55
-
56
57
  class MinCovDet(BaseTransformer):
57
58
  r"""Minimum Covariance Determinant (MCD): robust estimator of covariance
58
59
  For more details on this class, see [sklearn.covariance.MinCovDet]
@@ -60,6 +61,49 @@ class MinCovDet(BaseTransformer):
60
61
 
61
62
  Parameters
62
63
  ----------
64
+
65
+ input_cols: Optional[Union[str, List[str]]]
66
+ A string or list of strings representing column names that contain features.
67
+ If this parameter is not specified, all columns in the input DataFrame except
68
+ the columns specified by label_cols, sample_weight_col, and passthrough_cols
69
+ parameters are considered input columns. Input columns can also be set after
70
+ initialization with the `set_input_cols` method.
71
+
72
+ label_cols: Optional[Union[str, List[str]]]
73
+ This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
74
+
75
+ output_cols: Optional[Union[str, List[str]]]
76
+ A string or list of strings representing column names that will store the
77
+ output of predict and transform operations. The length of output_cols must
78
+ match the expected number of output columns from the specific predictor or
79
+ transformer class used.
80
+ If you omit this parameter, output column names are derived by adding an
81
+ OUTPUT_ prefix to the label column names for supervised estimators, or
82
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
83
+ work for predictors, but output_cols must be set explicitly for transformers.
84
+ In general, explicitly specifying output column names is clearer, especially
85
+ if you don’t specify the input column names.
86
+ To transform in place, pass the same names for input_cols and output_cols.
87
+ be set explicitly for transformers. Output columns can also be set after
88
+ initialization with the `set_output_cols` method.
89
+
90
+ sample_weight_col: Optional[str]
91
+ A string representing the column name containing the sample weights.
92
+ This argument is only required when working with weighted datasets. Sample
93
+ weight column can also be set after initialization with the
94
+ `set_sample_weight_col` method.
95
+
96
+ passthrough_cols: Optional[Union[str, List[str]]]
97
+ A string or a list of strings indicating column names to be excluded from any
98
+ operations (such as train, transform, or inference). These specified column(s)
99
+ will remain untouched throughout the process. This option is helpful in scenarios
100
+ requiring automatic input_cols inference, but need to avoid using specific
101
+ columns, like index columns, during training or inference. Passthrough columns
102
+ can also be set after initialization with the `set_passthrough_cols` method.
103
+
104
+ drop_input_cols: Optional[bool], default=False
105
+ If set, the response of predict(), transform() methods will not contain input columns.
106
+
63
107
  store_precision: bool, default=True
64
108
  Specify if the estimated precision is stored.
65
109
 
@@ -83,42 +127,6 @@ class MinCovDet(BaseTransformer):
83
127
  Determines the pseudo random number generator for shuffling the data.
84
128
  Pass an int for reproducible results across multiple function calls.
85
129
  See :term:`Glossary <random_state>`.
86
-
87
- input_cols: Optional[Union[str, List[str]]]
88
- A string or list of strings representing column names that contain features.
89
- If this parameter is not specified, all columns in the input DataFrame except
90
- the columns specified by label_cols, sample_weight_col, and passthrough_cols
91
- parameters are considered input columns.
92
-
93
- label_cols: Optional[Union[str, List[str]]]
94
- A string or list of strings representing column names that contain labels.
95
- This is a required param for estimators, as there is no way to infer these
96
- columns. If this parameter is not specified, then object is fitted without
97
- labels (like a transformer).
98
-
99
- output_cols: Optional[Union[str, List[str]]]
100
- A string or list of strings representing column names that will store the
101
- output of predict and transform operations. The length of output_cols must
102
- match the expected number of output columns from the specific estimator or
103
- transformer class used.
104
- If this parameter is not specified, output column names are derived by
105
- adding an OUTPUT_ prefix to the label column names. These inferred output
106
- column names work for estimator's predict() method, but output_cols must
107
- be set explicitly for transformers.
108
-
109
- sample_weight_col: Optional[str]
110
- A string representing the column name containing the sample weights.
111
- This argument is only required when working with weighted datasets.
112
-
113
- passthrough_cols: Optional[Union[str, List[str]]]
114
- A string or a list of strings indicating column names to be excluded from any
115
- operations (such as train, transform, or inference). These specified column(s)
116
- will remain untouched throughout the process. This option is helpful in scenarios
117
- requiring automatic input_cols inference, but need to avoid using specific
118
- columns, like index columns, during training or inference.
119
-
120
- drop_input_cols: Optional[bool], default=False
121
- If set, the response of predict(), transform() methods will not contain input columns.
122
130
  """
123
131
 
124
132
  def __init__( # type: ignore[no-untyped-def]
@@ -143,7 +151,7 @@ class MinCovDet(BaseTransformer):
143
151
  self.set_passthrough_cols(passthrough_cols)
144
152
  self.set_drop_input_cols(drop_input_cols)
145
153
  self.set_sample_weight_col(sample_weight_col)
146
- deps = set(SklearnWrapperProvider().dependencies)
154
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
147
155
 
148
156
  self._deps = list(deps)
149
157
 
@@ -155,13 +163,14 @@ class MinCovDet(BaseTransformer):
155
163
  args=init_args,
156
164
  klass=sklearn.covariance.MinCovDet
157
165
  )
158
- self._sklearn_object = sklearn.covariance.MinCovDet(
166
+ self._sklearn_object: Any = sklearn.covariance.MinCovDet(
159
167
  **cleaned_up_init_args,
160
168
  )
161
169
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
162
170
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
163
171
  self._snowpark_cols: Optional[List[str]] = self.input_cols
164
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=MinCovDet.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
172
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=MinCovDet.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
173
+ self._autogenerated = True
165
174
 
166
175
  def _get_rand_id(self) -> str:
167
176
  """
@@ -217,54 +226,48 @@ class MinCovDet(BaseTransformer):
217
226
  self
218
227
  """
219
228
  self._infer_input_output_cols(dataset)
220
- if isinstance(dataset, pd.DataFrame):
221
- assert self._sklearn_object is not None # keep mypy happy
222
- self._sklearn_object = self._handlers.fit_pandas(
223
- dataset,
224
- self._sklearn_object,
225
- self.input_cols,
226
- self.label_cols,
227
- self.sample_weight_col
228
- )
229
- elif isinstance(dataset, DataFrame):
230
- self._fit_snowpark(dataset)
231
- else:
232
- raise TypeError(
233
- f"Unexpected dataset type: {type(dataset)}."
234
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
235
- )
229
+ if isinstance(dataset, DataFrame):
230
+ session = dataset._session
231
+ assert session is not None # keep mypy happy
232
+ # Validate that key package version in user workspace are supported in snowflake conda channel
233
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
234
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
235
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
236
+
237
+ # Specify input columns so column pruning will be enforced
238
+ selected_cols = self._get_active_columns()
239
+ if len(selected_cols) > 0:
240
+ dataset = dataset.select(selected_cols)
241
+
242
+ self._snowpark_cols = dataset.select(self.input_cols).columns
243
+
244
+ # If we are already in a stored procedure, no need to kick off another one.
245
+ if SNOWML_SPROC_ENV in os.environ:
246
+ statement_params = telemetry.get_function_usage_statement_params(
247
+ project=_PROJECT,
248
+ subproject=_SUBPROJECT,
249
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), MinCovDet.__class__.__name__),
250
+ api_calls=[Session.call],
251
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
252
+ )
253
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
254
+ pd_df.columns = dataset.columns
255
+ dataset = pd_df
256
+
257
+ model_trainer = ModelTrainerBuilder.build(
258
+ estimator=self._sklearn_object,
259
+ dataset=dataset,
260
+ input_cols=self.input_cols,
261
+ label_cols=self.label_cols,
262
+ sample_weight_col=self.sample_weight_col,
263
+ autogenerated=self._autogenerated,
264
+ subproject=_SUBPROJECT
265
+ )
266
+ self._sklearn_object = model_trainer.train()
236
267
  self._is_fitted = True
237
268
  self._get_model_signatures(dataset)
238
269
  return self
239
270
 
240
- def _fit_snowpark(self, dataset: DataFrame) -> None:
241
- session = dataset._session
242
- assert session is not None # keep mypy happy
243
- # Validate that key package version in user workspace are supported in snowflake conda channel
244
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
245
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
246
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
247
-
248
- # Specify input columns so column pruning will be enforced
249
- selected_cols = self._get_active_columns()
250
- if len(selected_cols) > 0:
251
- dataset = dataset.select(selected_cols)
252
-
253
- estimator = self._sklearn_object
254
- assert estimator is not None # Keep mypy happy
255
-
256
- self._snowpark_cols = dataset.select(self.input_cols).columns
257
-
258
- self._sklearn_object = self._handlers.fit_snowpark(
259
- dataset,
260
- session,
261
- estimator,
262
- ["snowflake-snowpark-python"] + self._get_dependencies(),
263
- self.input_cols,
264
- self.label_cols,
265
- self.sample_weight_col,
266
- )
267
-
268
271
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
269
272
  if self._drop_input_cols:
270
273
  return []
@@ -452,11 +455,6 @@ class MinCovDet(BaseTransformer):
452
455
  subproject=_SUBPROJECT,
453
456
  custom_tags=dict([("autogen", True)]),
454
457
  )
455
- @telemetry.add_stmt_params_to_df(
456
- project=_PROJECT,
457
- subproject=_SUBPROJECT,
458
- custom_tags=dict([("autogen", True)]),
459
- )
460
458
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
461
459
  """Method not supported for this class.
462
460
 
@@ -508,11 +506,6 @@ class MinCovDet(BaseTransformer):
508
506
  subproject=_SUBPROJECT,
509
507
  custom_tags=dict([("autogen", True)]),
510
508
  )
511
- @telemetry.add_stmt_params_to_df(
512
- project=_PROJECT,
513
- subproject=_SUBPROJECT,
514
- custom_tags=dict([("autogen", True)]),
515
- )
516
509
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
517
510
  """Method not supported for this class.
518
511
 
@@ -569,7 +562,8 @@ class MinCovDet(BaseTransformer):
569
562
  if False:
570
563
  self.fit(dataset)
571
564
  assert self._sklearn_object is not None
572
- return self._sklearn_object.labels_
565
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
566
+ return labels
573
567
  else:
574
568
  raise NotImplementedError
575
569
 
@@ -605,6 +599,7 @@ class MinCovDet(BaseTransformer):
605
599
  output_cols = []
606
600
 
607
601
  # Make sure column names are valid snowflake identifiers.
602
+ assert output_cols is not None # Make MyPy happy
608
603
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
609
604
 
610
605
  return rv
@@ -615,11 +610,6 @@ class MinCovDet(BaseTransformer):
615
610
  subproject=_SUBPROJECT,
616
611
  custom_tags=dict([("autogen", True)]),
617
612
  )
618
- @telemetry.add_stmt_params_to_df(
619
- project=_PROJECT,
620
- subproject=_SUBPROJECT,
621
- custom_tags=dict([("autogen", True)]),
622
- )
623
613
  def predict_proba(
624
614
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
625
615
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -660,11 +650,6 @@ class MinCovDet(BaseTransformer):
660
650
  subproject=_SUBPROJECT,
661
651
  custom_tags=dict([("autogen", True)]),
662
652
  )
663
- @telemetry.add_stmt_params_to_df(
664
- project=_PROJECT,
665
- subproject=_SUBPROJECT,
666
- custom_tags=dict([("autogen", True)]),
667
- )
668
653
  def predict_log_proba(
669
654
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
670
655
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -701,16 +686,6 @@ class MinCovDet(BaseTransformer):
701
686
  return output_df
702
687
 
703
688
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
704
- @telemetry.send_api_usage_telemetry(
705
- project=_PROJECT,
706
- subproject=_SUBPROJECT,
707
- custom_tags=dict([("autogen", True)]),
708
- )
709
- @telemetry.add_stmt_params_to_df(
710
- project=_PROJECT,
711
- subproject=_SUBPROJECT,
712
- custom_tags=dict([("autogen", True)]),
713
- )
714
689
  def decision_function(
715
690
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
716
691
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -811,11 +786,6 @@ class MinCovDet(BaseTransformer):
811
786
  subproject=_SUBPROJECT,
812
787
  custom_tags=dict([("autogen", True)]),
813
788
  )
814
- @telemetry.add_stmt_params_to_df(
815
- project=_PROJECT,
816
- subproject=_SUBPROJECT,
817
- custom_tags=dict([("autogen", True)]),
818
- )
819
789
  def kneighbors(
820
790
  self,
821
791
  dataset: Union[DataFrame, pd.DataFrame],
@@ -875,9 +845,9 @@ class MinCovDet(BaseTransformer):
875
845
  # For classifier, the type of predict is the same as the type of label
876
846
  if self._sklearn_object._estimator_type == 'classifier':
877
847
  # label columns is the desired type for output
878
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
848
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
879
849
  # rename the output columns
880
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
850
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
881
851
  self._model_signature_dict["predict"] = ModelSignature(inputs,
882
852
  ([] if self._drop_input_cols else inputs)
883
853
  + outputs)
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
22
22
  from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
23
23
  from snowflake.ml._internal import telemetry
24
24
  from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
25
+ from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
25
26
  from snowflake.ml._internal.utils import pkg_version_utils, identifier
26
- from snowflake.snowpark import DataFrame
27
+ from snowflake.snowpark import DataFrame, Session
27
28
  from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
28
29
  from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
30
+ from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
31
+ from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
29
32
  from snowflake.ml.modeling._internal.estimator_utils import (
30
33
  gather_dependencies,
31
34
  original_estimator_has_callable,
32
35
  transform_snowml_obj_to_sklearn_obj,
33
36
  validate_sklearn_args,
34
37
  )
35
- from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
36
38
  from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
37
39
 
38
40
  from snowflake.ml.model.model_signature import (
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
52
54
  _SUBPROJECT = "".join([s.capitalize() for s in "sklearn.covariance".replace("sklearn.", "").split("_")])
53
55
 
54
56
 
55
-
56
57
  class OAS(BaseTransformer):
57
58
  r"""Oracle Approximating Shrinkage Estimator as proposed in [1]_
58
59
  For more details on this class, see [sklearn.covariance.OAS]
@@ -60,50 +61,57 @@ class OAS(BaseTransformer):
60
61
 
61
62
  Parameters
62
63
  ----------
63
- store_precision: bool, default=True
64
- Specify if the estimated precision is stored.
65
-
66
- assume_centered: bool, default=False
67
- If True, data will not be centered before computation.
68
- Useful when working with data whose mean is almost, but not exactly
69
- zero.
70
- If False (default), data will be centered before computation.
71
64
 
72
65
  input_cols: Optional[Union[str, List[str]]]
73
66
  A string or list of strings representing column names that contain features.
74
67
  If this parameter is not specified, all columns in the input DataFrame except
75
68
  the columns specified by label_cols, sample_weight_col, and passthrough_cols
76
- parameters are considered input columns.
77
-
69
+ parameters are considered input columns. Input columns can also be set after
70
+ initialization with the `set_input_cols` method.
71
+
78
72
  label_cols: Optional[Union[str, List[str]]]
79
- A string or list of strings representing column names that contain labels.
80
- This is a required param for estimators, as there is no way to infer these
81
- columns. If this parameter is not specified, then object is fitted without
82
- labels (like a transformer).
83
-
73
+ This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
74
+
84
75
  output_cols: Optional[Union[str, List[str]]]
85
76
  A string or list of strings representing column names that will store the
86
77
  output of predict and transform operations. The length of output_cols must
87
- match the expected number of output columns from the specific estimator or
78
+ match the expected number of output columns from the specific predictor or
88
79
  transformer class used.
89
- If this parameter is not specified, output column names are derived by
90
- adding an OUTPUT_ prefix to the label column names. These inferred output
91
- column names work for estimator's predict() method, but output_cols must
92
- be set explicitly for transformers.
80
+ If you omit this parameter, output column names are derived by adding an
81
+ OUTPUT_ prefix to the label column names for supervised estimators, or
82
+ OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
83
+ work for predictors, but output_cols must be set explicitly for transformers.
84
+ In general, explicitly specifying output column names is clearer, especially
85
+ if you don’t specify the input column names.
86
+ To transform in place, pass the same names for input_cols and output_cols.
87
+ be set explicitly for transformers. Output columns can also be set after
88
+ initialization with the `set_output_cols` method.
93
89
 
94
90
  sample_weight_col: Optional[str]
95
91
  A string representing the column name containing the sample weights.
96
- This argument is only required when working with weighted datasets.
92
+ This argument is only required when working with weighted datasets. Sample
93
+ weight column can also be set after initialization with the
94
+ `set_sample_weight_col` method.
97
95
 
98
96
  passthrough_cols: Optional[Union[str, List[str]]]
99
97
  A string or a list of strings indicating column names to be excluded from any
100
98
  operations (such as train, transform, or inference). These specified column(s)
101
99
  will remain untouched throughout the process. This option is helpful in scenarios
102
100
  requiring automatic input_cols inference, but need to avoid using specific
103
- columns, like index columns, during training or inference.
101
+ columns, like index columns, during training or inference. Passthrough columns
102
+ can also be set after initialization with the `set_passthrough_cols` method.
104
103
 
105
104
  drop_input_cols: Optional[bool], default=False
106
105
  If set, the response of predict(), transform() methods will not contain input columns.
106
+
107
+ store_precision: bool, default=True
108
+ Specify if the estimated precision is stored.
109
+
110
+ assume_centered: bool, default=False
111
+ If True, data will not be centered before computation.
112
+ Useful when working with data whose mean is almost, but not exactly
113
+ zero.
114
+ If False (default), data will be centered before computation.
107
115
  """
108
116
 
109
117
  def __init__( # type: ignore[no-untyped-def]
@@ -126,7 +134,7 @@ class OAS(BaseTransformer):
126
134
  self.set_passthrough_cols(passthrough_cols)
127
135
  self.set_drop_input_cols(drop_input_cols)
128
136
  self.set_sample_weight_col(sample_weight_col)
129
- deps = set(SklearnWrapperProvider().dependencies)
137
+ deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
130
138
 
131
139
  self._deps = list(deps)
132
140
 
@@ -136,13 +144,14 @@ class OAS(BaseTransformer):
136
144
  args=init_args,
137
145
  klass=sklearn.covariance.OAS
138
146
  )
139
- self._sklearn_object = sklearn.covariance.OAS(
147
+ self._sklearn_object: Any = sklearn.covariance.OAS(
140
148
  **cleaned_up_init_args,
141
149
  )
142
150
  self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
143
151
  # If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
144
152
  self._snowpark_cols: Optional[List[str]] = self.input_cols
145
- self._handlers: FitPredictHandlers = HandlersImpl(class_name=OAS.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True, wrapper_provider=SklearnWrapperProvider())
153
+ self._handlers: FitPredictHandlers = HandlersImpl(class_name=OAS.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
154
+ self._autogenerated = True
146
155
 
147
156
  def _get_rand_id(self) -> str:
148
157
  """
@@ -198,54 +207,48 @@ class OAS(BaseTransformer):
198
207
  self
199
208
  """
200
209
  self._infer_input_output_cols(dataset)
201
- if isinstance(dataset, pd.DataFrame):
202
- assert self._sklearn_object is not None # keep mypy happy
203
- self._sklearn_object = self._handlers.fit_pandas(
204
- dataset,
205
- self._sklearn_object,
206
- self.input_cols,
207
- self.label_cols,
208
- self.sample_weight_col
209
- )
210
- elif isinstance(dataset, DataFrame):
211
- self._fit_snowpark(dataset)
212
- else:
213
- raise TypeError(
214
- f"Unexpected dataset type: {type(dataset)}."
215
- "Supported dataset types: snowpark.DataFrame, pandas.DataFrame."
216
- )
210
+ if isinstance(dataset, DataFrame):
211
+ session = dataset._session
212
+ assert session is not None # keep mypy happy
213
+ # Validate that key package version in user workspace are supported in snowflake conda channel
214
+ # If customer doesn't have package in conda channel, replace the ones have the closest versions
215
+ self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
216
+ pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
217
+
218
+ # Specify input columns so column pruning will be enforced
219
+ selected_cols = self._get_active_columns()
220
+ if len(selected_cols) > 0:
221
+ dataset = dataset.select(selected_cols)
222
+
223
+ self._snowpark_cols = dataset.select(self.input_cols).columns
224
+
225
+ # If we are already in a stored procedure, no need to kick off another one.
226
+ if SNOWML_SPROC_ENV in os.environ:
227
+ statement_params = telemetry.get_function_usage_statement_params(
228
+ project=_PROJECT,
229
+ subproject=_SUBPROJECT,
230
+ function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), OAS.__class__.__name__),
231
+ api_calls=[Session.call],
232
+ custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
233
+ )
234
+ pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
235
+ pd_df.columns = dataset.columns
236
+ dataset = pd_df
237
+
238
+ model_trainer = ModelTrainerBuilder.build(
239
+ estimator=self._sklearn_object,
240
+ dataset=dataset,
241
+ input_cols=self.input_cols,
242
+ label_cols=self.label_cols,
243
+ sample_weight_col=self.sample_weight_col,
244
+ autogenerated=self._autogenerated,
245
+ subproject=_SUBPROJECT
246
+ )
247
+ self._sklearn_object = model_trainer.train()
217
248
  self._is_fitted = True
218
249
  self._get_model_signatures(dataset)
219
250
  return self
220
251
 
221
- def _fit_snowpark(self, dataset: DataFrame) -> None:
222
- session = dataset._session
223
- assert session is not None # keep mypy happy
224
- # Validate that key package version in user workspace are supported in snowflake conda channel
225
- # If customer doesn't have package in conda channel, replace the ones have the closest versions
226
- self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
227
- pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
228
-
229
- # Specify input columns so column pruning will be enforced
230
- selected_cols = self._get_active_columns()
231
- if len(selected_cols) > 0:
232
- dataset = dataset.select(selected_cols)
233
-
234
- estimator = self._sklearn_object
235
- assert estimator is not None # Keep mypy happy
236
-
237
- self._snowpark_cols = dataset.select(self.input_cols).columns
238
-
239
- self._sklearn_object = self._handlers.fit_snowpark(
240
- dataset,
241
- session,
242
- estimator,
243
- ["snowflake-snowpark-python"] + self._get_dependencies(),
244
- self.input_cols,
245
- self.label_cols,
246
- self.sample_weight_col,
247
- )
248
-
249
252
  def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
250
253
  if self._drop_input_cols:
251
254
  return []
@@ -433,11 +436,6 @@ class OAS(BaseTransformer):
433
436
  subproject=_SUBPROJECT,
434
437
  custom_tags=dict([("autogen", True)]),
435
438
  )
436
- @telemetry.add_stmt_params_to_df(
437
- project=_PROJECT,
438
- subproject=_SUBPROJECT,
439
- custom_tags=dict([("autogen", True)]),
440
- )
441
439
  def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
442
440
  """Method not supported for this class.
443
441
 
@@ -489,11 +487,6 @@ class OAS(BaseTransformer):
489
487
  subproject=_SUBPROJECT,
490
488
  custom_tags=dict([("autogen", True)]),
491
489
  )
492
- @telemetry.add_stmt_params_to_df(
493
- project=_PROJECT,
494
- subproject=_SUBPROJECT,
495
- custom_tags=dict([("autogen", True)]),
496
- )
497
490
  def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
498
491
  """Method not supported for this class.
499
492
 
@@ -550,7 +543,8 @@ class OAS(BaseTransformer):
550
543
  if False:
551
544
  self.fit(dataset)
552
545
  assert self._sklearn_object is not None
553
- return self._sklearn_object.labels_
546
+ labels : npt.NDArray[Any] = self._sklearn_object.labels_
547
+ return labels
554
548
  else:
555
549
  raise NotImplementedError
556
550
 
@@ -586,6 +580,7 @@ class OAS(BaseTransformer):
586
580
  output_cols = []
587
581
 
588
582
  # Make sure column names are valid snowflake identifiers.
583
+ assert output_cols is not None # Make MyPy happy
589
584
  rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
590
585
 
591
586
  return rv
@@ -596,11 +591,6 @@ class OAS(BaseTransformer):
596
591
  subproject=_SUBPROJECT,
597
592
  custom_tags=dict([("autogen", True)]),
598
593
  )
599
- @telemetry.add_stmt_params_to_df(
600
- project=_PROJECT,
601
- subproject=_SUBPROJECT,
602
- custom_tags=dict([("autogen", True)]),
603
- )
604
594
  def predict_proba(
605
595
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
606
596
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -641,11 +631,6 @@ class OAS(BaseTransformer):
641
631
  subproject=_SUBPROJECT,
642
632
  custom_tags=dict([("autogen", True)]),
643
633
  )
644
- @telemetry.add_stmt_params_to_df(
645
- project=_PROJECT,
646
- subproject=_SUBPROJECT,
647
- custom_tags=dict([("autogen", True)]),
648
- )
649
634
  def predict_log_proba(
650
635
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
651
636
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -682,16 +667,6 @@ class OAS(BaseTransformer):
682
667
  return output_df
683
668
 
684
669
  @available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
685
- @telemetry.send_api_usage_telemetry(
686
- project=_PROJECT,
687
- subproject=_SUBPROJECT,
688
- custom_tags=dict([("autogen", True)]),
689
- )
690
- @telemetry.add_stmt_params_to_df(
691
- project=_PROJECT,
692
- subproject=_SUBPROJECT,
693
- custom_tags=dict([("autogen", True)]),
694
- )
695
670
  def decision_function(
696
671
  self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
697
672
  ) -> Union[DataFrame, pd.DataFrame]:
@@ -792,11 +767,6 @@ class OAS(BaseTransformer):
792
767
  subproject=_SUBPROJECT,
793
768
  custom_tags=dict([("autogen", True)]),
794
769
  )
795
- @telemetry.add_stmt_params_to_df(
796
- project=_PROJECT,
797
- subproject=_SUBPROJECT,
798
- custom_tags=dict([("autogen", True)]),
799
- )
800
770
  def kneighbors(
801
771
  self,
802
772
  dataset: Union[DataFrame, pd.DataFrame],
@@ -856,9 +826,9 @@ class OAS(BaseTransformer):
856
826
  # For classifier, the type of predict is the same as the type of label
857
827
  if self._sklearn_object._estimator_type == 'classifier':
858
828
  # label columns is the desired type for output
859
- outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
829
+ outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
860
830
  # rename the output columns
861
- outputs = model_signature_utils.rename_features(outputs, self.output_cols)
831
+ outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
862
832
  self._model_signature_dict["predict"] = ModelSignature(inputs,
863
833
  ([] if self._drop_input_cols else inputs)
864
834
  + outputs)