snowflake-ml-python 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- snowflake/cortex/_complete.py +1 -1
- snowflake/cortex/_extract_answer.py +1 -1
- snowflake/cortex/_sentiment.py +1 -1
- snowflake/cortex/_summarize.py +1 -1
- snowflake/cortex/_translate.py +1 -1
- snowflake/ml/_internal/env_utils.py +68 -6
- snowflake/ml/_internal/file_utils.py +34 -4
- snowflake/ml/_internal/telemetry.py +79 -91
- snowflake/ml/_internal/utils/identifier.py +78 -72
- snowflake/ml/_internal/utils/retryable_http.py +16 -4
- snowflake/ml/_internal/utils/spcs_attribution_utils.py +122 -0
- snowflake/ml/dataset/dataset.py +1 -1
- snowflake/ml/model/_api.py +21 -14
- snowflake/ml/model/_client/model/model_impl.py +176 -0
- snowflake/ml/model/_client/model/model_method_info.py +19 -0
- snowflake/ml/model/_client/model/model_version_impl.py +291 -0
- snowflake/ml/model/_client/ops/metadata_ops.py +107 -0
- snowflake/ml/model/_client/ops/model_ops.py +308 -0
- snowflake/ml/model/_client/sql/model.py +75 -0
- snowflake/ml/model/_client/sql/model_version.py +213 -0
- snowflake/ml/model/_client/sql/stage.py +40 -0
- snowflake/ml/model/_deploy_client/image_builds/server_image_builder.py +3 -4
- snowflake/ml/model/_deploy_client/image_builds/templates/image_build_job_spec_template +24 -8
- snowflake/ml/model/_deploy_client/image_builds/templates/kaniko_shell_script_template +23 -0
- snowflake/ml/model/_deploy_client/snowservice/deploy.py +14 -2
- snowflake/ml/model/_deploy_client/utils/constants.py +1 -0
- snowflake/ml/model/_deploy_client/warehouse/deploy.py +2 -2
- snowflake/ml/model/_model_composer/model_composer.py +31 -9
- snowflake/ml/model/_model_composer/model_manifest/model_manifest.py +25 -10
- snowflake/ml/model/_model_composer/model_manifest/model_manifest_schema.py +2 -2
- snowflake/ml/model/_model_composer/model_method/infer_function.py_template +2 -1
- snowflake/ml/model/_model_composer/model_method/model_method.py +34 -3
- snowflake/ml/model/_model_composer/model_runtime/model_runtime.py +1 -1
- snowflake/ml/model/_packager/model_handlers/huggingface_pipeline.py +3 -1
- snowflake/ml/model/_packager/model_handlers/snowmlmodel.py +10 -28
- snowflake/ml/model/_packager/model_meta/model_meta.py +18 -16
- snowflake/ml/model/_signatures/snowpark_handler.py +1 -1
- snowflake/ml/model/model_signature.py +108 -53
- snowflake/ml/model/type_hints.py +1 -0
- snowflake/ml/modeling/_internal/distributed_hpo_trainer.py +554 -0
- snowflake/ml/modeling/_internal/estimator_protocols.py +1 -60
- snowflake/ml/modeling/_internal/model_specifications.py +146 -0
- snowflake/ml/modeling/_internal/model_trainer.py +13 -0
- snowflake/ml/modeling/_internal/model_trainer_builder.py +78 -0
- snowflake/ml/modeling/_internal/pandas_trainer.py +54 -0
- snowflake/ml/modeling/_internal/snowpark_handlers.py +6 -760
- snowflake/ml/modeling/_internal/snowpark_trainer.py +331 -0
- snowflake/ml/modeling/calibration/calibrated_classifier_cv.py +108 -135
- snowflake/ml/modeling/cluster/affinity_propagation.py +106 -135
- snowflake/ml/modeling/cluster/agglomerative_clustering.py +106 -135
- snowflake/ml/modeling/cluster/birch.py +106 -135
- snowflake/ml/modeling/cluster/bisecting_k_means.py +106 -135
- snowflake/ml/modeling/cluster/dbscan.py +106 -135
- snowflake/ml/modeling/cluster/feature_agglomeration.py +106 -135
- snowflake/ml/modeling/cluster/k_means.py +105 -135
- snowflake/ml/modeling/cluster/mean_shift.py +106 -135
- snowflake/ml/modeling/cluster/mini_batch_k_means.py +105 -135
- snowflake/ml/modeling/cluster/optics.py +106 -135
- snowflake/ml/modeling/cluster/spectral_biclustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_clustering.py +106 -135
- snowflake/ml/modeling/cluster/spectral_coclustering.py +106 -135
- snowflake/ml/modeling/compose/column_transformer.py +106 -135
- snowflake/ml/modeling/compose/transformed_target_regressor.py +108 -135
- snowflake/ml/modeling/covariance/elliptic_envelope.py +106 -135
- snowflake/ml/modeling/covariance/empirical_covariance.py +99 -128
- snowflake/ml/modeling/covariance/graphical_lasso.py +106 -135
- snowflake/ml/modeling/covariance/graphical_lasso_cv.py +106 -135
- snowflake/ml/modeling/covariance/ledoit_wolf.py +104 -133
- snowflake/ml/modeling/covariance/min_cov_det.py +106 -135
- snowflake/ml/modeling/covariance/oas.py +99 -128
- snowflake/ml/modeling/covariance/shrunk_covariance.py +103 -132
- snowflake/ml/modeling/decomposition/dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/factor_analysis.py +106 -135
- snowflake/ml/modeling/decomposition/fast_ica.py +106 -135
- snowflake/ml/modeling/decomposition/incremental_pca.py +106 -135
- snowflake/ml/modeling/decomposition/kernel_pca.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_dictionary_learning.py +106 -135
- snowflake/ml/modeling/decomposition/mini_batch_sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/pca.py +106 -135
- snowflake/ml/modeling/decomposition/sparse_pca.py +106 -135
- snowflake/ml/modeling/decomposition/truncated_svd.py +106 -135
- snowflake/ml/modeling/discriminant_analysis/linear_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/discriminant_analysis/quadratic_discriminant_analysis.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/ada_boost_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/bagging_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/extra_trees_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/hist_gradient_boosting_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/isolation_forest.py +106 -135
- snowflake/ml/modeling/ensemble/random_forest_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/random_forest_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/stacking_regressor.py +108 -135
- snowflake/ml/modeling/ensemble/voting_classifier.py +108 -135
- snowflake/ml/modeling/ensemble/voting_regressor.py +108 -135
- snowflake/ml/modeling/feature_selection/generic_univariate_select.py +101 -128
- snowflake/ml/modeling/feature_selection/select_fdr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fpr.py +99 -126
- snowflake/ml/modeling/feature_selection/select_fwe.py +99 -126
- snowflake/ml/modeling/feature_selection/select_k_best.py +100 -127
- snowflake/ml/modeling/feature_selection/select_percentile.py +99 -126
- snowflake/ml/modeling/feature_selection/sequential_feature_selector.py +106 -135
- snowflake/ml/modeling/feature_selection/variance_threshold.py +95 -124
- snowflake/ml/modeling/framework/base.py +83 -1
- snowflake/ml/modeling/gaussian_process/gaussian_process_classifier.py +108 -135
- snowflake/ml/modeling/gaussian_process/gaussian_process_regressor.py +108 -135
- snowflake/ml/modeling/impute/iterative_imputer.py +106 -135
- snowflake/ml/modeling/impute/knn_imputer.py +106 -135
- snowflake/ml/modeling/impute/missing_indicator.py +106 -135
- snowflake/ml/modeling/impute/simple_imputer.py +9 -1
- snowflake/ml/modeling/kernel_approximation/additive_chi2_sampler.py +96 -125
- snowflake/ml/modeling/kernel_approximation/nystroem.py +106 -135
- snowflake/ml/modeling/kernel_approximation/polynomial_count_sketch.py +106 -135
- snowflake/ml/modeling/kernel_approximation/rbf_sampler.py +105 -134
- snowflake/ml/modeling/kernel_approximation/skewed_chi2_sampler.py +103 -132
- snowflake/ml/modeling/kernel_ridge/kernel_ridge.py +108 -135
- snowflake/ml/modeling/lightgbm/lgbm_classifier.py +90 -118
- snowflake/ml/modeling/lightgbm/lgbm_regressor.py +90 -118
- snowflake/ml/modeling/linear_model/ard_regression.py +108 -135
- snowflake/ml/modeling/linear_model/bayesian_ridge.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/gamma_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/huber_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/lars.py +108 -135
- snowflake/ml/modeling/linear_model/lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_cv.py +108 -135
- snowflake/ml/modeling/linear_model/lasso_lars_ic.py +108 -135
- snowflake/ml/modeling/linear_model/linear_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression.py +108 -135
- snowflake/ml/modeling/linear_model/logistic_regression_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_elastic_net_cv.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso.py +108 -135
- snowflake/ml/modeling/linear_model/multi_task_lasso_cv.py +108 -135
- snowflake/ml/modeling/linear_model/orthogonal_matching_pursuit.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/passive_aggressive_regressor.py +107 -135
- snowflake/ml/modeling/linear_model/perceptron.py +107 -135
- snowflake/ml/modeling/linear_model/poisson_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ransac_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/ridge.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_classifier_cv.py +108 -135
- snowflake/ml/modeling/linear_model/ridge_cv.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_classifier.py +108 -135
- snowflake/ml/modeling/linear_model/sgd_one_class_svm.py +106 -135
- snowflake/ml/modeling/linear_model/sgd_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/theil_sen_regressor.py +108 -135
- snowflake/ml/modeling/linear_model/tweedie_regressor.py +108 -135
- snowflake/ml/modeling/manifold/isomap.py +106 -135
- snowflake/ml/modeling/manifold/mds.py +106 -135
- snowflake/ml/modeling/manifold/spectral_embedding.py +106 -135
- snowflake/ml/modeling/manifold/tsne.py +106 -135
- snowflake/ml/modeling/metrics/classification.py +196 -55
- snowflake/ml/modeling/metrics/correlation.py +4 -2
- snowflake/ml/modeling/metrics/covariance.py +7 -4
- snowflake/ml/modeling/metrics/ranking.py +32 -16
- snowflake/ml/modeling/metrics/regression.py +60 -32
- snowflake/ml/modeling/mixture/bayesian_gaussian_mixture.py +106 -135
- snowflake/ml/modeling/mixture/gaussian_mixture.py +106 -135
- snowflake/ml/modeling/model_selection/grid_search_cv.py +91 -148
- snowflake/ml/modeling/model_selection/randomized_search_cv.py +93 -154
- snowflake/ml/modeling/multiclass/one_vs_one_classifier.py +105 -132
- snowflake/ml/modeling/multiclass/one_vs_rest_classifier.py +108 -135
- snowflake/ml/modeling/multiclass/output_code_classifier.py +108 -135
- snowflake/ml/modeling/naive_bayes/bernoulli_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/categorical_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/complement_nb.py +108 -135
- snowflake/ml/modeling/naive_bayes/gaussian_nb.py +98 -125
- snowflake/ml/modeling/naive_bayes/multinomial_nb.py +107 -134
- snowflake/ml/modeling/neighbors/k_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/k_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neighbors/kernel_density.py +106 -135
- snowflake/ml/modeling/neighbors/local_outlier_factor.py +106 -135
- snowflake/ml/modeling/neighbors/nearest_centroid.py +108 -135
- snowflake/ml/modeling/neighbors/nearest_neighbors.py +106 -135
- snowflake/ml/modeling/neighbors/neighborhood_components_analysis.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_classifier.py +108 -135
- snowflake/ml/modeling/neighbors/radius_neighbors_regressor.py +108 -135
- snowflake/ml/modeling/neural_network/bernoulli_rbm.py +106 -135
- snowflake/ml/modeling/neural_network/mlp_classifier.py +108 -135
- snowflake/ml/modeling/neural_network/mlp_regressor.py +108 -135
- snowflake/ml/modeling/parameters/disable_distributed_hpo.py +2 -6
- snowflake/ml/modeling/preprocessing/binarizer.py +25 -8
- snowflake/ml/modeling/preprocessing/k_bins_discretizer.py +9 -4
- snowflake/ml/modeling/preprocessing/label_encoder.py +31 -11
- snowflake/ml/modeling/preprocessing/max_abs_scaler.py +27 -9
- snowflake/ml/modeling/preprocessing/min_max_scaler.py +42 -14
- snowflake/ml/modeling/preprocessing/normalizer.py +9 -4
- snowflake/ml/modeling/preprocessing/one_hot_encoder.py +26 -10
- snowflake/ml/modeling/preprocessing/ordinal_encoder.py +37 -13
- snowflake/ml/modeling/preprocessing/polynomial_features.py +106 -135
- snowflake/ml/modeling/preprocessing/robust_scaler.py +39 -13
- snowflake/ml/modeling/preprocessing/standard_scaler.py +36 -12
- snowflake/ml/modeling/semi_supervised/label_propagation.py +108 -135
- snowflake/ml/modeling/semi_supervised/label_spreading.py +108 -135
- snowflake/ml/modeling/svm/linear_svc.py +108 -135
- snowflake/ml/modeling/svm/linear_svr.py +108 -135
- snowflake/ml/modeling/svm/nu_svc.py +108 -135
- snowflake/ml/modeling/svm/nu_svr.py +108 -135
- snowflake/ml/modeling/svm/svc.py +108 -135
- snowflake/ml/modeling/svm/svr.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/decision_tree_regressor.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_classifier.py +108 -135
- snowflake/ml/modeling/tree/extra_tree_regressor.py +108 -135
- snowflake/ml/modeling/xgboost/xgb_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgb_regressor.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_classifier.py +108 -136
- snowflake/ml/modeling/xgboost/xgbrf_regressor.py +108 -136
- snowflake/ml/registry/model_registry.py +2 -0
- snowflake/ml/registry/registry.py +215 -0
- snowflake/ml/version.py +1 -1
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/METADATA +34 -1
- snowflake_ml_python-1.1.2.dist-info/RECORD +347 -0
- snowflake_ml_python-1.1.0.dist-info/RECORD +0 -331
- {snowflake_ml_python-1.1.0.dist-info → snowflake_ml_python-1.1.2.dist-info}/WHEEL +0 -0
@@ -22,17 +22,19 @@ from sklearn.utils.metaestimators import available_if
|
|
22
22
|
from snowflake.ml.modeling.framework.base import BaseTransformer, _process_cols
|
23
23
|
from snowflake.ml._internal import telemetry
|
24
24
|
from snowflake.ml._internal.exceptions import error_codes, exceptions, modeling_error_messages
|
25
|
+
from snowflake.ml._internal.env_utils import SNOWML_SPROC_ENV
|
25
26
|
from snowflake.ml._internal.utils import pkg_version_utils, identifier
|
26
|
-
from snowflake.snowpark import DataFrame
|
27
|
+
from snowflake.snowpark import DataFrame, Session
|
27
28
|
from snowflake.snowpark._internal.type_utils import convert_sp_to_sf_type
|
28
29
|
from snowflake.ml.modeling._internal.snowpark_handlers import SnowparkHandlers as HandlersImpl
|
30
|
+
from snowflake.ml.modeling._internal.model_trainer_builder import ModelTrainerBuilder
|
31
|
+
from snowflake.ml.modeling._internal.model_trainer import ModelTrainer
|
29
32
|
from snowflake.ml.modeling._internal.estimator_utils import (
|
30
33
|
gather_dependencies,
|
31
34
|
original_estimator_has_callable,
|
32
35
|
transform_snowml_obj_to_sklearn_obj,
|
33
36
|
validate_sklearn_args,
|
34
37
|
)
|
35
|
-
from snowflake.ml.modeling._internal.snowpark_handlers import SklearnWrapperProvider
|
36
38
|
from snowflake.ml.modeling._internal.estimator_protocols import FitPredictHandlers
|
37
39
|
|
38
40
|
from snowflake.ml.model.model_signature import (
|
@@ -52,7 +54,6 @@ _PROJECT = "ModelDevelopment"
|
|
52
54
|
_SUBPROJECT = "".join([s.capitalize() for s in "sklearn.cluster".replace("sklearn.", "").split("_")])
|
53
55
|
|
54
56
|
|
55
|
-
|
56
57
|
class SpectralBiclustering(BaseTransformer):
|
57
58
|
r"""Spectral biclustering (Kluger, 2003)
|
58
59
|
For more details on this class, see [sklearn.cluster.SpectralBiclustering]
|
@@ -60,6 +61,49 @@ class SpectralBiclustering(BaseTransformer):
|
|
60
61
|
|
61
62
|
Parameters
|
62
63
|
----------
|
64
|
+
|
65
|
+
input_cols: Optional[Union[str, List[str]]]
|
66
|
+
A string or list of strings representing column names that contain features.
|
67
|
+
If this parameter is not specified, all columns in the input DataFrame except
|
68
|
+
the columns specified by label_cols, sample_weight_col, and passthrough_cols
|
69
|
+
parameters are considered input columns. Input columns can also be set after
|
70
|
+
initialization with the `set_input_cols` method.
|
71
|
+
|
72
|
+
label_cols: Optional[Union[str, List[str]]]
|
73
|
+
This parameter is optional and will be ignored during fit. It is present here for API consistency by convention.
|
74
|
+
|
75
|
+
output_cols: Optional[Union[str, List[str]]]
|
76
|
+
A string or list of strings representing column names that will store the
|
77
|
+
output of predict and transform operations. The length of output_cols must
|
78
|
+
match the expected number of output columns from the specific predictor or
|
79
|
+
transformer class used.
|
80
|
+
If you omit this parameter, output column names are derived by adding an
|
81
|
+
OUTPUT_ prefix to the label column names for supervised estimators, or
|
82
|
+
OUTPUT_<IDX>for unsupervised estimators. These inferred output column names
|
83
|
+
work for predictors, but output_cols must be set explicitly for transformers.
|
84
|
+
In general, explicitly specifying output column names is clearer, especially
|
85
|
+
if you don’t specify the input column names.
|
86
|
+
To transform in place, pass the same names for input_cols and output_cols.
|
87
|
+
be set explicitly for transformers. Output columns can also be set after
|
88
|
+
initialization with the `set_output_cols` method.
|
89
|
+
|
90
|
+
sample_weight_col: Optional[str]
|
91
|
+
A string representing the column name containing the sample weights.
|
92
|
+
This argument is only required when working with weighted datasets. Sample
|
93
|
+
weight column can also be set after initialization with the
|
94
|
+
`set_sample_weight_col` method.
|
95
|
+
|
96
|
+
passthrough_cols: Optional[Union[str, List[str]]]
|
97
|
+
A string or a list of strings indicating column names to be excluded from any
|
98
|
+
operations (such as train, transform, or inference). These specified column(s)
|
99
|
+
will remain untouched throughout the process. This option is helpful in scenarios
|
100
|
+
requiring automatic input_cols inference, but need to avoid using specific
|
101
|
+
columns, like index columns, during training or inference. Passthrough columns
|
102
|
+
can also be set after initialization with the `set_passthrough_cols` method.
|
103
|
+
|
104
|
+
drop_input_cols: Optional[bool], default=False
|
105
|
+
If set, the response of predict(), transform() methods will not contain input columns.
|
106
|
+
|
63
107
|
n_clusters: int or tuple (n_row_clusters, n_column_clusters), default=3
|
64
108
|
The number of row and column clusters in the checkerboard
|
65
109
|
structure.
|
@@ -111,35 +155,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
111
155
|
Used for randomizing the singular value decomposition and the k-means
|
112
156
|
initialization. Use an int to make the randomness deterministic.
|
113
157
|
See :term:`Glossary <random_state>`.
|
114
|
-
|
115
|
-
input_cols: Optional[Union[str, List[str]]]
|
116
|
-
A string or list of strings representing column names that contain features.
|
117
|
-
If this parameter is not specified, all columns in the input DataFrame except
|
118
|
-
the columns specified by label_cols and sample_weight_col parameters are
|
119
|
-
considered input columns.
|
120
|
-
|
121
|
-
label_cols: Optional[Union[str, List[str]]]
|
122
|
-
A string or list of strings representing column names that contain labels.
|
123
|
-
This is a required param for estimators, as there is no way to infer these
|
124
|
-
columns. If this parameter is not specified, then object is fitted without
|
125
|
-
labels (like a transformer).
|
126
|
-
|
127
|
-
output_cols: Optional[Union[str, List[str]]]
|
128
|
-
A string or list of strings representing column names that will store the
|
129
|
-
output of predict and transform operations. The length of output_cols must
|
130
|
-
match the expected number of output columns from the specific estimator or
|
131
|
-
transformer class used.
|
132
|
-
If this parameter is not specified, output column names are derived by
|
133
|
-
adding an OUTPUT_ prefix to the label column names. These inferred output
|
134
|
-
column names work for estimator's predict() method, but output_cols must
|
135
|
-
be set explicitly for transformers.
|
136
|
-
|
137
|
-
sample_weight_col: Optional[str]
|
138
|
-
A string representing the column name containing the sample weights.
|
139
|
-
This argument is only required when working with weighted datasets.
|
140
|
-
|
141
|
-
drop_input_cols: Optional[bool], default=False
|
142
|
-
If set, the response of predict(), transform() methods will not contain input columns.
|
143
158
|
"""
|
144
159
|
|
145
160
|
def __init__( # type: ignore[no-untyped-def]
|
@@ -158,6 +173,7 @@ class SpectralBiclustering(BaseTransformer):
|
|
158
173
|
input_cols: Optional[Union[str, Iterable[str]]] = None,
|
159
174
|
output_cols: Optional[Union[str, Iterable[str]]] = None,
|
160
175
|
label_cols: Optional[Union[str, Iterable[str]]] = None,
|
176
|
+
passthrough_cols: Optional[Union[str, Iterable[str]]] = None,
|
161
177
|
drop_input_cols: Optional[bool] = False,
|
162
178
|
sample_weight_col: Optional[str] = None,
|
163
179
|
) -> None:
|
@@ -166,9 +182,10 @@ class SpectralBiclustering(BaseTransformer):
|
|
166
182
|
self.set_input_cols(input_cols)
|
167
183
|
self.set_output_cols(output_cols)
|
168
184
|
self.set_label_cols(label_cols)
|
185
|
+
self.set_passthrough_cols(passthrough_cols)
|
169
186
|
self.set_drop_input_cols(drop_input_cols)
|
170
187
|
self.set_sample_weight_col(sample_weight_col)
|
171
|
-
deps = set(
|
188
|
+
deps: Set[str] = set([f'numpy=={np.__version__}', f'scikit-learn=={sklearn.__version__}', f'cloudpickle=={cp.__version__}'])
|
172
189
|
|
173
190
|
self._deps = list(deps)
|
174
191
|
|
@@ -186,13 +203,14 @@ class SpectralBiclustering(BaseTransformer):
|
|
186
203
|
args=init_args,
|
187
204
|
klass=sklearn.cluster.SpectralBiclustering
|
188
205
|
)
|
189
|
-
self._sklearn_object = sklearn.cluster.SpectralBiclustering(
|
206
|
+
self._sklearn_object: Any = sklearn.cluster.SpectralBiclustering(
|
190
207
|
**cleaned_up_init_args,
|
191
208
|
)
|
192
209
|
self._model_signature_dict: Optional[Dict[str, ModelSignature]] = None
|
193
210
|
# If user used snowpark dataframe during fit, here it stores the snowpark input_cols, otherwise the processed input_cols
|
194
211
|
self._snowpark_cols: Optional[List[str]] = self.input_cols
|
195
|
-
self._handlers: FitPredictHandlers = HandlersImpl(class_name=SpectralBiclustering.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True
|
212
|
+
self._handlers: FitPredictHandlers = HandlersImpl(class_name=SpectralBiclustering.__class__.__name__, subproject=_SUBPROJECT, autogenerated=True)
|
213
|
+
self._autogenerated = True
|
196
214
|
|
197
215
|
def _get_rand_id(self) -> str:
|
198
216
|
"""
|
@@ -203,24 +221,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
203
221
|
"""
|
204
222
|
return str(uuid4()).replace("-", "_").upper()
|
205
223
|
|
206
|
-
def _infer_input_output_cols(self, dataset: Union[DataFrame, pd.DataFrame]) -> None:
|
207
|
-
"""
|
208
|
-
Infer `self.input_cols` and `self.output_cols` if they are not explicitly set.
|
209
|
-
|
210
|
-
Args:
|
211
|
-
dataset: Input dataset.
|
212
|
-
"""
|
213
|
-
if not self.input_cols:
|
214
|
-
cols = [
|
215
|
-
c for c in dataset.columns
|
216
|
-
if c not in self.get_label_cols() and c != self.sample_weight_col
|
217
|
-
]
|
218
|
-
self.set_input_cols(input_cols=cols)
|
219
|
-
|
220
|
-
if not self.output_cols:
|
221
|
-
cols = [identifier.concat_names(ids=['OUTPUT_', c]) for c in self.label_cols]
|
222
|
-
self.set_output_cols(output_cols=cols)
|
223
|
-
|
224
224
|
def set_input_cols(self, input_cols: Optional[Union[str, Iterable[str]]]) -> "SpectralBiclustering":
|
225
225
|
"""
|
226
226
|
Input columns setter.
|
@@ -266,54 +266,48 @@ class SpectralBiclustering(BaseTransformer):
|
|
266
266
|
self
|
267
267
|
"""
|
268
268
|
self._infer_input_output_cols(dataset)
|
269
|
-
if isinstance(dataset,
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
self.
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
269
|
+
if isinstance(dataset, DataFrame):
|
270
|
+
session = dataset._session
|
271
|
+
assert session is not None # keep mypy happy
|
272
|
+
# Validate that key package version in user workspace are supported in snowflake conda channel
|
273
|
+
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
274
|
+
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
275
|
+
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
276
|
+
|
277
|
+
# Specify input columns so column pruning will be enforced
|
278
|
+
selected_cols = self._get_active_columns()
|
279
|
+
if len(selected_cols) > 0:
|
280
|
+
dataset = dataset.select(selected_cols)
|
281
|
+
|
282
|
+
self._snowpark_cols = dataset.select(self.input_cols).columns
|
283
|
+
|
284
|
+
# If we are already in a stored procedure, no need to kick off another one.
|
285
|
+
if SNOWML_SPROC_ENV in os.environ:
|
286
|
+
statement_params = telemetry.get_function_usage_statement_params(
|
287
|
+
project=_PROJECT,
|
288
|
+
subproject=_SUBPROJECT,
|
289
|
+
function_name=telemetry.get_statement_params_full_func_name(inspect.currentframe(), SpectralBiclustering.__class__.__name__),
|
290
|
+
api_calls=[Session.call],
|
291
|
+
custom_tags=dict([("autogen", True)]) if self._autogenerated else None,
|
292
|
+
)
|
293
|
+
pd_df: pd.DataFrame = dataset.to_pandas(statement_params=statement_params)
|
294
|
+
pd_df.columns = dataset.columns
|
295
|
+
dataset = pd_df
|
296
|
+
|
297
|
+
model_trainer = ModelTrainerBuilder.build(
|
298
|
+
estimator=self._sklearn_object,
|
299
|
+
dataset=dataset,
|
300
|
+
input_cols=self.input_cols,
|
301
|
+
label_cols=self.label_cols,
|
302
|
+
sample_weight_col=self.sample_weight_col,
|
303
|
+
autogenerated=self._autogenerated,
|
304
|
+
subproject=_SUBPROJECT
|
305
|
+
)
|
306
|
+
self._sklearn_object = model_trainer.train()
|
285
307
|
self._is_fitted = True
|
286
308
|
self._get_model_signatures(dataset)
|
287
309
|
return self
|
288
310
|
|
289
|
-
def _fit_snowpark(self, dataset: DataFrame) -> None:
|
290
|
-
session = dataset._session
|
291
|
-
assert session is not None # keep mypy happy
|
292
|
-
# Validate that key package version in user workspace are supported in snowflake conda channel
|
293
|
-
# If customer doesn't have package in conda channel, replace the ones have the closest versions
|
294
|
-
self._deps = pkg_version_utils.get_valid_pkg_versions_supported_in_snowflake_conda_channel(
|
295
|
-
pkg_versions=self._get_dependencies(), session=session, subproject=_SUBPROJECT)
|
296
|
-
|
297
|
-
# Specify input columns so column pruning will be enforced
|
298
|
-
selected_cols = self._get_active_columns()
|
299
|
-
if len(selected_cols) > 0:
|
300
|
-
dataset = dataset.select(selected_cols)
|
301
|
-
|
302
|
-
estimator = self._sklearn_object
|
303
|
-
assert estimator is not None # Keep mypy happy
|
304
|
-
|
305
|
-
self._snowpark_cols = dataset.select(self.input_cols).columns
|
306
|
-
|
307
|
-
self._sklearn_object = self._handlers.fit_snowpark(
|
308
|
-
dataset,
|
309
|
-
session,
|
310
|
-
estimator,
|
311
|
-
["snowflake-snowpark-python"] + self._get_dependencies(),
|
312
|
-
self.input_cols,
|
313
|
-
self.label_cols,
|
314
|
-
self.sample_weight_col,
|
315
|
-
)
|
316
|
-
|
317
311
|
def _get_pass_through_columns(self, dataset: DataFrame) -> List[str]:
|
318
312
|
if self._drop_input_cols:
|
319
313
|
return []
|
@@ -501,11 +495,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
501
495
|
subproject=_SUBPROJECT,
|
502
496
|
custom_tags=dict([("autogen", True)]),
|
503
497
|
)
|
504
|
-
@telemetry.add_stmt_params_to_df(
|
505
|
-
project=_PROJECT,
|
506
|
-
subproject=_SUBPROJECT,
|
507
|
-
custom_tags=dict([("autogen", True)]),
|
508
|
-
)
|
509
498
|
def predict(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
510
499
|
"""Method not supported for this class.
|
511
500
|
|
@@ -557,11 +546,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
557
546
|
subproject=_SUBPROJECT,
|
558
547
|
custom_tags=dict([("autogen", True)]),
|
559
548
|
)
|
560
|
-
@telemetry.add_stmt_params_to_df(
|
561
|
-
project=_PROJECT,
|
562
|
-
subproject=_SUBPROJECT,
|
563
|
-
custom_tags=dict([("autogen", True)]),
|
564
|
-
)
|
565
549
|
def transform(self, dataset: Union[DataFrame, pd.DataFrame]) -> Union[DataFrame, pd.DataFrame]:
|
566
550
|
"""Method not supported for this class.
|
567
551
|
|
@@ -618,7 +602,8 @@ class SpectralBiclustering(BaseTransformer):
|
|
618
602
|
if False:
|
619
603
|
self.fit(dataset)
|
620
604
|
assert self._sklearn_object is not None
|
621
|
-
|
605
|
+
labels : npt.NDArray[Any] = self._sklearn_object.labels_
|
606
|
+
return labels
|
622
607
|
else:
|
623
608
|
raise NotImplementedError
|
624
609
|
|
@@ -654,6 +639,7 @@ class SpectralBiclustering(BaseTransformer):
|
|
654
639
|
output_cols = []
|
655
640
|
|
656
641
|
# Make sure column names are valid snowflake identifiers.
|
642
|
+
assert output_cols is not None # Make MyPy happy
|
657
643
|
rv = [identifier.rename_to_valid_snowflake_identifier(c) for c in output_cols]
|
658
644
|
|
659
645
|
return rv
|
@@ -664,11 +650,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
664
650
|
subproject=_SUBPROJECT,
|
665
651
|
custom_tags=dict([("autogen", True)]),
|
666
652
|
)
|
667
|
-
@telemetry.add_stmt_params_to_df(
|
668
|
-
project=_PROJECT,
|
669
|
-
subproject=_SUBPROJECT,
|
670
|
-
custom_tags=dict([("autogen", True)]),
|
671
|
-
)
|
672
653
|
def predict_proba(
|
673
654
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_proba_"
|
674
655
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -709,11 +690,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
709
690
|
subproject=_SUBPROJECT,
|
710
691
|
custom_tags=dict([("autogen", True)]),
|
711
692
|
)
|
712
|
-
@telemetry.add_stmt_params_to_df(
|
713
|
-
project=_PROJECT,
|
714
|
-
subproject=_SUBPROJECT,
|
715
|
-
custom_tags=dict([("autogen", True)]),
|
716
|
-
)
|
717
693
|
def predict_log_proba(
|
718
694
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "predict_log_proba_"
|
719
695
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -750,16 +726,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
750
726
|
return output_df
|
751
727
|
|
752
728
|
@available_if(original_estimator_has_callable("decision_function")) # type: ignore[misc]
|
753
|
-
@telemetry.send_api_usage_telemetry(
|
754
|
-
project=_PROJECT,
|
755
|
-
subproject=_SUBPROJECT,
|
756
|
-
custom_tags=dict([("autogen", True)]),
|
757
|
-
)
|
758
|
-
@telemetry.add_stmt_params_to_df(
|
759
|
-
project=_PROJECT,
|
760
|
-
subproject=_SUBPROJECT,
|
761
|
-
custom_tags=dict([("autogen", True)]),
|
762
|
-
)
|
763
729
|
def decision_function(
|
764
730
|
self, dataset: Union[DataFrame, pd.DataFrame], output_cols_prefix: str = "decision_function_"
|
765
731
|
) -> Union[DataFrame, pd.DataFrame]:
|
@@ -858,11 +824,6 @@ class SpectralBiclustering(BaseTransformer):
|
|
858
824
|
subproject=_SUBPROJECT,
|
859
825
|
custom_tags=dict([("autogen", True)]),
|
860
826
|
)
|
861
|
-
@telemetry.add_stmt_params_to_df(
|
862
|
-
project=_PROJECT,
|
863
|
-
subproject=_SUBPROJECT,
|
864
|
-
custom_tags=dict([("autogen", True)]),
|
865
|
-
)
|
866
827
|
def kneighbors(
|
867
828
|
self,
|
868
829
|
dataset: Union[DataFrame, pd.DataFrame],
|
@@ -922,18 +883,28 @@ class SpectralBiclustering(BaseTransformer):
|
|
922
883
|
# For classifier, the type of predict is the same as the type of label
|
923
884
|
if self._sklearn_object._estimator_type == 'classifier':
|
924
885
|
# label columns is the desired type for output
|
925
|
-
outputs = _infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True)
|
886
|
+
outputs = list(_infer_signature(dataset[self.label_cols], "output", use_snowflake_identifiers=True))
|
926
887
|
# rename the output columns
|
927
|
-
outputs = model_signature_utils.rename_features(outputs, self.output_cols)
|
888
|
+
outputs = list(model_signature_utils.rename_features(outputs, self.output_cols))
|
889
|
+
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
890
|
+
([] if self._drop_input_cols else inputs)
|
891
|
+
+ outputs)
|
892
|
+
# For mixture models that use the density mixin, `predict` returns the argmax of the log prob.
|
893
|
+
# For outlier models, returns -1 for outliers and 1 for inliers.
|
894
|
+
# Clusterer returns int64 cluster labels.
|
895
|
+
elif self._sklearn_object._estimator_type in ["DensityEstimator", "clusterer", "outlier_detector"]:
|
896
|
+
outputs = [FeatureSpec(dtype=DataType.INT64, name=c) for c in self.output_cols]
|
928
897
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
929
898
|
([] if self._drop_input_cols else inputs)
|
930
899
|
+ outputs)
|
900
|
+
|
931
901
|
# For regressor, the type of predict is float64
|
932
902
|
elif self._sklearn_object._estimator_type == 'regressor':
|
933
903
|
outputs = [FeatureSpec(dtype=DataType.DOUBLE, name=c) for c in self.output_cols]
|
934
904
|
self._model_signature_dict["predict"] = ModelSignature(inputs,
|
935
905
|
([] if self._drop_input_cols else inputs)
|
936
906
|
+ outputs)
|
907
|
+
|
937
908
|
for prob_func in PROB_FUNCTIONS:
|
938
909
|
if hasattr(self, prob_func):
|
939
910
|
output_cols_prefix: str = f"{prob_func}_"
|