snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.cluster".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class OPTICS(BaseTransformer):
|
57
58
|
r"""Estimate clustering structure from vector array
|
58
59
|
For more details on this class, see [sklearn.cluster.OPTICS]
|
@@ -60,6 +61,49 @@ class OPTICS(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
min_samples: int > 1 or float between 0 and 1, default=5
|
64
108
|
The number of samples in a neighborhood for a point to be considered as
|
65
109
|
a core point. Also, up and down steep regions can't have more than
|
@@ -165,35 +209,6 @@ class OPTICS(BaseTransformer):
|
|
165
209
|
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
166
210
|
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
167
211
|
for more details.
|
168
|
-
|
169
|
-
input_cols: Optional[Union[str, List[str]]]
|
170
|
-
A string or list of strings representing column names that contain features.
|
171
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
172
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
173
|
-
considered input columns.
|
174
|
-
|
175
|
-
label_cols: Optional[Union[str, List[str]]]
|
176
|
-
A string or list of strings representing column names that contain labels.
|
177
|
-
This is a required param for estimators, as there is no way to infer these
|
178
|
-
columns. If this parameter is not specified, then object is fitted without
|
179
|
-
labels (like a transformer).
|
180
|
-
|
181
|
-
output_cols: Optional[Union[str, List[str]]]
|
182
|
-
A string or list of strings representing column names that will store the
|
183
|
-
output of predict and transform operations. The length of output_cols must
|
184
|
-
match the expected number of output columns from the specific estimator or
|
185
|
-
transformer class used.
|
186
|
-
If this parameter is not specified, output column names are derived by
|
187
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
188
|
-
column names work for estimator's predict() method, but output_cols must
|
189
|
-
be set explicitly for transformers.
|
190
|
-
|
191
|
-
sample_weight_col: Optional[str]
|
192
|
-
A string representing the column name containing the sample weights.
|
193
|
-
This argument is only required when working with weighted datasets.
|
194
|
-
|
195
|
-
drop_input_cols: Optional[bool], default=False
|
196
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
197
212
|
"""
|
198
213
|
|
199
214
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -216,6 +231,7 @@ class OPTICS(BaseTransformer):
|
|
216
231
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
217
232
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
218
233
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
234
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
219
235
|
drop_input_cols: Optional[bool] = False,
|
220
236
|
sample_weight_col: Optional[str] = None,
|
221
237
|
) -> None:
|
@@ -224,9 +240,10 @@ class OPTICS(BaseTransformer):
|
|
224
240
|
self.set_input_cols(input_cols)
|
225
241
|
self.set_output_cols(output_cols)
|
226
242
|
self.set_label_cols(label_cols)
|
243
|
+
self.set_passthrough_cols(passthrough_cols)
|
227
244
|
self.set_drop_input_cols(drop_input_cols)
|
228
245
|
self.set_sample_weight_col(sample_weight_col)
|
229
|
-
deps = set(
|
246
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
230
247
|
|
231
248
|
self._deps = list(deps)
|
232
249
|
|
@@ -248,13 +265,14 @@ class OPTICS(BaseTransformer):
|
|
248
265
|
args=init_args,
|
249
266
|
klass=sklearn.cluster.OPTICS
|
250
267
|
)
|
251
|
-
self._sklearn_object = sklearn.cluster.OPTICS(
|
268
|
+
self._sklearn_object: Any = sklearn.cluster.OPTICS(
|
252
269
|
**cleaned_up_init_args,
|
253
270
|
)
|
254
271
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
255
272
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
256
273
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
257
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=OPTICS.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
274
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=OPTICS.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
275
|
+
self._autogenerated = True
|
258
276
|
|
259
277
|
def _get_rand_id(self) -> str:
|
260
278
|
"""
|
@@ -265,24 +283,6 @@ class OPTICS(BaseTransformer):
|
|
265
283
|
"""
|
266
284
|
return str(uuid4()).replace("-", "_").upper()
|
267
285
|
|
268
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
269
|
-
"""
|
270
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
271
|
-
|
272
|
-
Args:
|
273
|
-
dataset: Input dataset.
|
274
|
-
"""
|
275
|
-
if not self.input_cols:
|
276
|
-
cols = [
|
277
|
-
c for c in dataset.columns
|
278
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
279
|
-
]
|
280
|
-
self.set_input_cols(input_cols=cols)
|
281
|
-
|
282
|
-
if not self.output_cols:
|
283
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
284
|
-
self.set_output_cols(output_cols=cols)
|
285
|
-
|
286
286
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "OPTICS":
|
287
287
|
"""
|
288
288
|
Input columns setter.
|
@@ -328,54 +328,48 @@ class OPTICS(BaseTransformer):
|
|
328
328
|
self
|
329
329
|
"""
|
330
330
|
self._infer_input_output_cols(dataset)
|
331
|
-
if isinstance(dataset,
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
self.
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
331
|
+
if isinstance(dataset, DataFrame):
|
332
|
+
session = dataset._session
|
333
|
+
assert session is not None # keep mypy happy
|
334
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
335
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
336
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
337
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
338
|
+
|
339
|
+
# Specify input columns so column pruning will be enforced
|
340
|
+
selected_cols = self._get_active_columns()
|
341
|
+
if len(selected_cols) > 0:
|
342
|
+
dataset = dataset.select(selected_cols)
|
343
|
+
|
344
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
345
|
+
|
346
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
347
|
+
if SNOWML_SPROC_ENV in os.environ:
|
348
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
349
|
+
project=_PROJECT,
|
350
|
+
subproject=_SUBPROJECT,
|
351
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), OPTICS.__class__.__name__),
|
352
|
+
api_calls=[Session.call],
|
353
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
354
|
+
)
|
355
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
356
|
+
pd_df.columns = dataset.columns
|
357
|
+
dataset = pd_df
|
358
|
+
|
359
|
+
model_trainer = ModelTrainerBuilder.build(
|
360
|
+
estimator=self._sklearn_object,
|
361
|
+
dataset=dataset,
|
362
|
+
input_cols=self.input_cols,
|
363
|
+
label_cols=self.label_cols,
|
364
|
+
sample_weight_col=self.sample_weight_col,
|
365
|
+
autogenerated=self._autogenerated,
|
366
|
+
subproject=_SUBPROJECT
|
367
|
+
)
|
368
|
+
self._sklearn_object = model_trainer.train()
|
347
369
|
self._is_fitted = True
|
348
370
|
self._get_model_signatures(dataset)
|
349
371
|
return self
|
350
372
|
|
351
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
352
|
-
session = dataset._session
|
353
|
-
assert session is not None # keep mypy happy
|
354
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
355
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
356
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
357
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
358
|
-
|
359
|
-
# Specify input columns so column pruning will be enforced
|
360
|
-
selected_cols = self._get_active_columns()
|
361
|
-
if len(selected_cols) > 0:
|
362
|
-
dataset = dataset.select(selected_cols)
|
363
|
-
|
364
|
-
estimator = self._sklearn_object
|
365
|
-
assert estimator is not None # Keep mypy happy
|
366
|
-
|
367
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
368
|
-
|
369
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
370
|
-
dataset,
|
371
|
-
session,
|
372
|
-
estimator,
|
373
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
374
|
-
self.input_cols,
|
375
|
-
self.label_cols,
|
376
|
-
self.sample_weight_col,
|
377
|
-
)
|
378
|
-
|
379
373
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
380
374
|
if self._drop_input_cols:
|
381
375
|
return []
|
@@ -563,11 +557,6 @@ class OPTICS(BaseTransformer):
|
|
563
557
|
subproject=_SUBPROJECT,
|
564
558
|
custom_tags=dict([("autogen", True)]),
|
565
559
|
)
|
566
|
-
@telemetry.add_stmt_params_to_df(
|
567
|
-
project=_PROJECT,
|
568
|
-
subproject=_SUBPROJECT,
|
569
|
-
custom_tags=dict([("autogen", True)]),
|
570
|
-
)
|
571
560
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
572
561
|
"""Method not supported for this class.
|
573
562
|
|
@@ -619,11 +608,6 @@ class OPTICS(BaseTransformer):
|
|
619
608
|
subproject=_SUBPROJECT,
|
620
609
|
custom_tags=dict([("autogen", True)]),
|
621
610
|
)
|
622
|
-
@telemetry.add_stmt_params_to_df(
|
623
|
-
project=_PROJECT,
|
624
|
-
subproject=_SUBPROJECT,
|
625
|
-
custom_tags=dict([("autogen", True)]),
|
626
|
-
)
|
627
611
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
628
612
|
"""Method not supported for this class.
|
629
613
|
|
@@ -682,7 +666,8 @@ class OPTICS(BaseTransformer):
|
|
682
666
|
if True:
|
683
667
|
self.fit(dataset)
|
684
668
|
assert self._sklearn_object is not None
|
685
|
-
|
669
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
670
|
+
return labels
|
686
671
|
else:
|
687
672
|
raise NotImplementedError
|
688
673
|
|
@@ -718,6 +703,7 @@ class OPTICS(BaseTransformer):
|
|
718
703
|
output_cols = []
|
719
704
|
|
720
705
|
# Make sure column names are valid snowflake identifiers.
|
706
|
+
assert output_cols is not None # Make MyPy happy
|
721
707
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
722
708
|
|
723
709
|
return rv
|
@@ -728,11 +714,6 @@ class OPTICS(BaseTransformer):
|
|
728
714
|
subproject=_SUBPROJECT,
|
729
715
|
custom_tags=dict([("autogen", True)]),
|
730
716
|
)
|
731
|
-
@telemetry.add_stmt_params_to_df(
|
732
|
-
project=_PROJECT,
|
733
|
-
subproject=_SUBPROJECT,
|
734
|
-
custom_tags=dict([("autogen", True)]),
|
735
|
-
)
|
736
717
|
def predict_proba(
|
737
718
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
738
719
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -773,11 +754,6 @@ class OPTICS(BaseTransformer):
|
|
773
754
|
subproject=_SUBPROJECT,
|
774
755
|
custom_tags=dict([("autogen", True)]),
|
775
756
|
)
|
776
|
-
@telemetry.add_stmt_params_to_df(
|
777
|
-
project=_PROJECT,
|
778
|
-
subproject=_SUBPROJECT,
|
779
|
-
custom_tags=dict([("autogen", True)]),
|
780
|
-
)
|
781
757
|
def predict_log_proba(
|
782
758
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
783
759
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -814,16 +790,6 @@ class OPTICS(BaseTransformer):
|
|
814
790
|
return output_df
|
815
791
|
|
816
792
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
817
|
-
@telemetry.send_api_usage_telemetry(
|
818
|
-
project=_PROJECT,
|
819
|
-
subproject=_SUBPROJECT,
|
820
|
-
custom_tags=dict([("autogen", True)]),
|
821
|
-
)
|
822
|
-
@telemetry.add_stmt_params_to_df(
|
823
|
-
project=_PROJECT,
|
824
|
-
subproject=_SUBPROJECT,
|
825
|
-
custom_tags=dict([("autogen", True)]),
|
826
|
-
)
|
827
793
|
def decision_function(
|
828
794
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
829
795
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -922,11 +888,6 @@ class OPTICS(BaseTransformer):
|
|
922
888
|
subproject=_SUBPROJECT,
|
923
889
|
custom_tags=dict([("autogen", True)]),
|
924
890
|
)
|
925
|
-
@telemetry.add_stmt_params_to_df(
|
926
|
-
project=_PROJECT,
|
927
|
-
subproject=_SUBPROJECT,
|
928
|
-
custom_tags=dict([("autogen", True)]),
|
929
|
-
)
|
930
891
|
def kneighbors(
|
931
892
|
self,
|
932
893
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -986,18 +947,28 @@ class OPTICS(BaseTransformer):
|
|
986
947
|
# For classifier, the type of predict is the same as the type of label
|
987
948
|
if self._sklearn_object._estimator_type == 'classifier':
|
988
949
|
# label columns is the desired type for output
|
989
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
950
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
990
951
|
# rename the output columns
|
991
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
952
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
953
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
954
|
+
([] if self._drop_input_cols else inputs)
|
955
|
+
+ outputs)
|
956
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
957
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
958
|
+
# Clusterer returns int64 cluster labels.
|
959
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
960
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
992
961
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
993
962
|
([] if self._drop_input_cols else inputs)
|
994
963
|
+ outputs)
|
964
|
+
|
995
965
|
# For regressor, the type of predict is float64
|
996
966
|
elif self._sklearn_object._estimator_type == 'regressor':
|
997
967
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
998
968
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
999
969
|
([] if self._drop_input_cols else inputs)
|
1000
970
|
+ outputs)
|
971
|
+
|
1001
972
|
for prob_func in PROB_FUNCTIONS:
|
1002
973
|
if hasattr(self, prob_func):
|
1003
974
|
output_cols_prefix: str = f"{prob_func}_"
|