snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.cluster".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class AgglomerativeClustering(BaseTransformer):
|
57
58
|
r"""Agglomerative Clustering
|
58
59
|
For more details on this class, see [sklearn.cluster.AgglomerativeClustering]
|
@@ -60,6 +61,49 @@ class AgglomerativeClustering(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
n_clusters: int or None, default=2
|
64
108
|
The number of clusters to find. It must be ``None`` if
|
65
109
|
``distance_threshold`` is not ``None``.
|
@@ -127,35 +171,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
127
171
|
Computes distances between clusters even if `distance_threshold` is not
|
128
172
|
used. This can be used to make dendrogram visualization, but introduces
|
129
173
|
a computational and memory overhead.
|
130
|
-
|
131
|
-
input_cols: Optional[Union[str, List[str]]]
|
132
|
-
A string or list of strings representing column names that contain features.
|
133
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
134
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
135
|
-
considered input columns.
|
136
|
-
|
137
|
-
label_cols: Optional[Union[str, List[str]]]
|
138
|
-
A string or list of strings representing column names that contain labels.
|
139
|
-
This is a required param for estimators, as there is no way to infer these
|
140
|
-
columns. If this parameter is not specified, then object is fitted without
|
141
|
-
labels (like a transformer).
|
142
|
-
|
143
|
-
output_cols: Optional[Union[str, List[str]]]
|
144
|
-
A string or list of strings representing column names that will store the
|
145
|
-
output of predict and transform operations. The length of output_cols must
|
146
|
-
match the expected number of output columns from the specific estimator or
|
147
|
-
transformer class used.
|
148
|
-
If this parameter is not specified, output column names are derived by
|
149
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
150
|
-
column names work for estimator's predict() method, but output_cols must
|
151
|
-
be set explicitly for transformers.
|
152
|
-
|
153
|
-
sample_weight_col: Optional[str]
|
154
|
-
A string representing the column name containing the sample weights.
|
155
|
-
This argument is only required when working with weighted datasets.
|
156
|
-
|
157
|
-
drop_input_cols: Optional[bool], default=False
|
158
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
159
174
|
"""
|
160
175
|
|
161
176
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -173,6 +188,7 @@ class AgglomerativeClustering(BaseTransformer):
|
|
173
188
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
174
189
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
175
190
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
191
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
176
192
|
drop_input_cols: Optional[bool] = False,
|
177
193
|
sample_weight_col: Optional[str] = None,
|
178
194
|
) -> None:
|
@@ -181,9 +197,10 @@ class AgglomerativeClustering(BaseTransformer):
|
|
181
197
|
self.set_input_cols(input_cols)
|
182
198
|
self.set_output_cols(output_cols)
|
183
199
|
self.set_label_cols(label_cols)
|
200
|
+
self.set_passthrough_cols(passthrough_cols)
|
184
201
|
self.set_drop_input_cols(drop_input_cols)
|
185
202
|
self.set_sample_weight_col(sample_weight_col)
|
186
|
-
deps = set(
|
203
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
187
204
|
|
188
205
|
self._deps = list(deps)
|
189
206
|
|
@@ -200,13 +217,14 @@ class AgglomerativeClustering(BaseTransformer):
|
|
200
217
|
args=init_args,
|
201
218
|
klass=sklearn.cluster.AgglomerativeClustering
|
202
219
|
)
|
203
|
-
self._sklearn_object = sklearn.cluster.AgglomerativeClustering(
|
220
|
+
self._sklearn_object: Any = sklearn.cluster.AgglomerativeClustering(
|
204
221
|
**cleaned_up_init_args,
|
205
222
|
)
|
206
223
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
207
224
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
208
225
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
209
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=AgglomerativeClustering.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
226
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=AgglomerativeClustering.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
227
|
+
self._autogenerated = True
|
210
228
|
|
211
229
|
def _get_rand_id(self) -> str:
|
212
230
|
"""
|
@@ -217,24 +235,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
217
235
|
"""
|
218
236
|
return str(uuid4()).replace("-", "_").upper()
|
219
237
|
|
220
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
221
|
-
"""
|
222
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
223
|
-
|
224
|
-
Args:
|
225
|
-
dataset: Input dataset.
|
226
|
-
"""
|
227
|
-
if not self.input_cols:
|
228
|
-
cols = [
|
229
|
-
c for c in dataset.columns
|
230
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
231
|
-
]
|
232
|
-
self.set_input_cols(input_cols=cols)
|
233
|
-
|
234
|
-
if not self.output_cols:
|
235
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
236
|
-
self.set_output_cols(output_cols=cols)
|
237
|
-
|
238
238
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "AgglomerativeClustering":
|
239
239
|
"""
|
240
240
|
Input columns setter.
|
@@ -280,54 +280,48 @@ class AgglomerativeClustering(BaseTransformer):
|
|
280
280
|
self
|
281
281
|
"""
|
282
282
|
self._infer_input_output_cols(dataset)
|
283
|
-
if isinstance(dataset,
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
self.
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
283
|
+
if isinstance(dataset, DataFrame):
|
284
|
+
session = dataset._session
|
285
|
+
assert session is not None # keep mypy happy
|
286
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
287
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
288
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
289
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
290
|
+
|
291
|
+
# Specify input columns so column pruning will be enforced
|
292
|
+
selected_cols = self._get_active_columns()
|
293
|
+
if len(selected_cols) > 0:
|
294
|
+
dataset = dataset.select(selected_cols)
|
295
|
+
|
296
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
297
|
+
|
298
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
299
|
+
if SNOWML_SPROC_ENV in os.environ:
|
300
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
301
|
+
project=_PROJECT,
|
302
|
+
subproject=_SUBPROJECT,
|
303
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), AgglomerativeClustering.__class__.__name__),
|
304
|
+
api_calls=[Session.call],
|
305
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
306
|
+
)
|
307
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
308
|
+
pd_df.columns = dataset.columns
|
309
|
+
dataset = pd_df
|
310
|
+
|
311
|
+
model_trainer = ModelTrainerBuilder.build(
|
312
|
+
estimator=self._sklearn_object,
|
313
|
+
dataset=dataset,
|
314
|
+
input_cols=self.input_cols,
|
315
|
+
label_cols=self.label_cols,
|
316
|
+
sample_weight_col=self.sample_weight_col,
|
317
|
+
autogenerated=self._autogenerated,
|
318
|
+
subproject=_SUBPROJECT
|
319
|
+
)
|
320
|
+
self._sklearn_object = model_trainer.train()
|
299
321
|
self._is_fitted = True
|
300
322
|
self._get_model_signatures(dataset)
|
301
323
|
return self
|
302
324
|
|
303
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
304
|
-
session = dataset._session
|
305
|
-
assert session is not None # keep mypy happy
|
306
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
307
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
308
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
309
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
310
|
-
|
311
|
-
# Specify input columns so column pruning will be enforced
|
312
|
-
selected_cols = self._get_active_columns()
|
313
|
-
if len(selected_cols) > 0:
|
314
|
-
dataset = dataset.select(selected_cols)
|
315
|
-
|
316
|
-
estimator = self._sklearn_object
|
317
|
-
assert estimator is not None # Keep mypy happy
|
318
|
-
|
319
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
320
|
-
|
321
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
322
|
-
dataset,
|
323
|
-
session,
|
324
|
-
estimator,
|
325
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
326
|
-
self.input_cols,
|
327
|
-
self.label_cols,
|
328
|
-
self.sample_weight_col,
|
329
|
-
)
|
330
|
-
|
331
325
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
332
326
|
if self._drop_input_cols:
|
333
327
|
return []
|
@@ -515,11 +509,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
515
509
|
subproject=_SUBPROJECT,
|
516
510
|
custom_tags=dict([("autogen", True)]),
|
517
511
|
)
|
518
|
-
@telemetry.add_stmt_params_to_df(
|
519
|
-
project=_PROJECT,
|
520
|
-
subproject=_SUBPROJECT,
|
521
|
-
custom_tags=dict([("autogen", True)]),
|
522
|
-
)
|
523
512
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
524
513
|
"""Method not supported for this class.
|
525
514
|
|
@@ -571,11 +560,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
571
560
|
subproject=_SUBPROJECT,
|
572
561
|
custom_tags=dict([("autogen", True)]),
|
573
562
|
)
|
574
|
-
@telemetry.add_stmt_params_to_df(
|
575
|
-
project=_PROJECT,
|
576
|
-
subproject=_SUBPROJECT,
|
577
|
-
custom_tags=dict([("autogen", True)]),
|
578
|
-
)
|
579
563
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
580
564
|
"""Method not supported for this class.
|
581
565
|
|
@@ -634,7 +618,8 @@ class AgglomerativeClustering(BaseTransformer):
|
|
634
618
|
if True:
|
635
619
|
self.fit(dataset)
|
636
620
|
assert self._sklearn_object is not None
|
637
|
-
|
621
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
622
|
+
return labels
|
638
623
|
else:
|
639
624
|
raise NotImplementedError
|
640
625
|
|
@@ -670,6 +655,7 @@ class AgglomerativeClustering(BaseTransformer):
|
|
670
655
|
output_cols = []
|
671
656
|
|
672
657
|
# Make sure column names are valid snowflake identifiers.
|
658
|
+
assert output_cols is not None # Make MyPy happy
|
673
659
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
674
660
|
|
675
661
|
return rv
|
@@ -680,11 +666,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
680
666
|
subproject=_SUBPROJECT,
|
681
667
|
custom_tags=dict([("autogen", True)]),
|
682
668
|
)
|
683
|
-
@telemetry.add_stmt_params_to_df(
|
684
|
-
project=_PROJECT,
|
685
|
-
subproject=_SUBPROJECT,
|
686
|
-
custom_tags=dict([("autogen", True)]),
|
687
|
-
)
|
688
669
|
def predict_proba(
|
689
670
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
690
671
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -725,11 +706,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
725
706
|
subproject=_SUBPROJECT,
|
726
707
|
custom_tags=dict([("autogen", True)]),
|
727
708
|
)
|
728
|
-
@telemetry.add_stmt_params_to_df(
|
729
|
-
project=_PROJECT,
|
730
|
-
subproject=_SUBPROJECT,
|
731
|
-
custom_tags=dict([("autogen", True)]),
|
732
|
-
)
|
733
709
|
def predict_log_proba(
|
734
710
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
735
711
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -766,16 +742,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
766
742
|
return output_df
|
767
743
|
|
768
744
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
769
|
-
@telemetry.send_api_usage_telemetry(
|
770
|
-
project=_PROJECT,
|
771
|
-
subproject=_SUBPROJECT,
|
772
|
-
custom_tags=dict([("autogen", True)]),
|
773
|
-
)
|
774
|
-
@telemetry.add_stmt_params_to_df(
|
775
|
-
project=_PROJECT,
|
776
|
-
subproject=_SUBPROJECT,
|
777
|
-
custom_tags=dict([("autogen", True)]),
|
778
|
-
)
|
779
745
|
def decision_function(
|
780
746
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
781
747
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -874,11 +840,6 @@ class AgglomerativeClustering(BaseTransformer):
|
|
874
840
|
subproject=_SUBPROJECT,
|
875
841
|
custom_tags=dict([("autogen", True)]),
|
876
842
|
)
|
877
|
-
@telemetry.add_stmt_params_to_df(
|
878
|
-
project=_PROJECT,
|
879
|
-
subproject=_SUBPROJECT,
|
880
|
-
custom_tags=dict([("autogen", True)]),
|
881
|
-
)
|
882
843
|
def kneighbors(
|
883
844
|
self,
|
884
845
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -938,18 +899,28 @@ class AgglomerativeClustering(BaseTransformer):
|
|
938
899
|
# For classifier, the type of predict is the same as the type of label
|
939
900
|
if self._sklearn_object._estimator_type == 'classifier':
|
940
901
|
# label columns is the desired type for output
|
941
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
902
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
942
903
|
# rename the output columns
|
943
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
904
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
905
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
906
|
+
([] if self._drop_input_cols else inputs)
|
907
|
+
+ outputs)
|
908
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
909
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
910
|
+
# Clusterer returns int64 cluster labels.
|
911
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
912
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
944
913
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
945
914
|
([] if self._drop_input_cols else inputs)
|
946
915
|
+ outputs)
|
916
|
+
|
947
917
|
# For regressor, the type of predict is float64
|
948
918
|
elif self._sklearn_object._estimator_type == 'regressor':
|
949
919
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
950
920
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
951
921
|
([] if self._drop_input_cols else inputs)
|
952
922
|
+ outputs)
|
923
|
+
|
953
924
|
for prob_func in PROB_FUNCTIONS:
|
954
925
|
if hasattr(self, prob_func):
|
955
926
|
output_cols_prefix: str = f"{prob_func}_"
|